bnx2x: use netdev_alloc_frag()
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2012 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
23 #include <linux/ip.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
29 #include "bnx2x_sp.h"
30
31
32
33 /**
34  * bnx2x_move_fp - move content of the fastpath structure.
35  *
36  * @bp:         driver handle
37  * @from:       source FP index
38  * @to:         destination FP index
39  *
40  * Makes sure the contents of the bp->fp[to].napi is kept
41  * intact. This is done by first copying the napi struct from
42  * the target to the source, and then mem copying the entire
43  * source onto the target. Update txdata pointers and related
44  * content.
45  */
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47 {
48         struct bnx2x_fastpath *from_fp = &bp->fp[from];
49         struct bnx2x_fastpath *to_fp = &bp->fp[to];
50         struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51         struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52         struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53         struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54         int old_max_eth_txqs, new_max_eth_txqs;
55         int old_txdata_index = 0, new_txdata_index = 0;
56
57         /* Copy the NAPI object as it has been already initialized */
58         from_fp->napi = to_fp->napi;
59
60         /* Move bnx2x_fastpath contents */
61         memcpy(to_fp, from_fp, sizeof(*to_fp));
62         to_fp->index = to;
63
64         /* move sp_objs contents as well, as their indices match fp ones */
65         memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67         /* move fp_stats contents as well, as their indices match fp ones */
68         memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
70         /* Update txdata pointers in fp and move txdata content accordingly:
71          * Each fp consumes 'max_cos' txdata structures, so the index should be
72          * decremented by max_cos x delta.
73          */
74
75         old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76         new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77                                 (bp)->max_cos;
78         if (from == FCOE_IDX(bp)) {
79                 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80                 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81         }
82
83         memcpy(&bp->bnx2x_txq[old_txdata_index],
84                &bp->bnx2x_txq[new_txdata_index],
85                sizeof(struct bnx2x_fp_txdata));
86         to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
87 }
88
89 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
90
91 /* free skb in the packet ring at pos idx
92  * return idx of last bd freed
93  */
94 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
95                              u16 idx, unsigned int *pkts_compl,
96                              unsigned int *bytes_compl)
97 {
98         struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
99         struct eth_tx_start_bd *tx_start_bd;
100         struct eth_tx_bd *tx_data_bd;
101         struct sk_buff *skb = tx_buf->skb;
102         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
103         int nbd;
104
105         /* prefetch skb end pointer to speedup dev_kfree_skb() */
106         prefetch(&skb->end);
107
108         DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
109            txdata->txq_index, idx, tx_buf, skb);
110
111         /* unmap first bd */
112         tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
113         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
114                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
115
116
117         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118 #ifdef BNX2X_STOP_ON_ERROR
119         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120                 BNX2X_ERR("BAD nbd!\n");
121                 bnx2x_panic();
122         }
123 #endif
124         new_cons = nbd + tx_buf->first_bd;
125
126         /* Get the next bd */
127         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
128
129         /* Skip a parse bd... */
130         --nbd;
131         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
132
133         /* ...and the TSO split header bd since they have no mapping */
134         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
135                 --nbd;
136                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
137         }
138
139         /* now free frags */
140         while (nbd > 0) {
141
142                 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
143                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
145                 if (--nbd)
146                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
147         }
148
149         /* release skb */
150         WARN_ON(!skb);
151         if (likely(skb)) {
152                 (*pkts_compl)++;
153                 (*bytes_compl) += skb->len;
154         }
155
156         dev_kfree_skb_any(skb);
157         tx_buf->first_bd = 0;
158         tx_buf->skb = NULL;
159
160         return new_cons;
161 }
162
163 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
164 {
165         struct netdev_queue *txq;
166         u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
167         unsigned int pkts_compl = 0, bytes_compl = 0;
168
169 #ifdef BNX2X_STOP_ON_ERROR
170         if (unlikely(bp->panic))
171                 return -1;
172 #endif
173
174         txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175         hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176         sw_cons = txdata->tx_pkt_cons;
177
178         while (sw_cons != hw_cons) {
179                 u16 pkt_cons;
180
181                 pkt_cons = TX_BD(sw_cons);
182
183                 DP(NETIF_MSG_TX_DONE,
184                    "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
185                    txdata->txq_index, hw_cons, sw_cons, pkt_cons);
186
187                 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188                     &pkts_compl, &bytes_compl);
189
190                 sw_cons++;
191         }
192
193         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
194
195         txdata->tx_pkt_cons = sw_cons;
196         txdata->tx_bd_cons = bd_cons;
197
198         /* Need to make the tx_bd_cons update visible to start_xmit()
199          * before checking for netif_tx_queue_stopped().  Without the
200          * memory barrier, there is a small possibility that
201          * start_xmit() will miss it and cause the queue to be stopped
202          * forever.
203          * On the other hand we need an rmb() here to ensure the proper
204          * ordering of bit testing in the following
205          * netif_tx_queue_stopped(txq) call.
206          */
207         smp_mb();
208
209         if (unlikely(netif_tx_queue_stopped(txq))) {
210                 /* Taking tx_lock() is needed to prevent reenabling the queue
211                  * while it's empty. This could have happen if rx_action() gets
212                  * suspended in bnx2x_tx_int() after the condition before
213                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
214                  *
215                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
216                  * sends some packets consuming the whole queue again->
217                  * stops the queue
218                  */
219
220                 __netif_tx_lock(txq, smp_processor_id());
221
222                 if ((netif_tx_queue_stopped(txq)) &&
223                     (bp->state == BNX2X_STATE_OPEN) &&
224                     (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
225                         netif_tx_wake_queue(txq);
226
227                 __netif_tx_unlock(txq);
228         }
229         return 0;
230 }
231
232 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
233                                              u16 idx)
234 {
235         u16 last_max = fp->last_max_sge;
236
237         if (SUB_S16(idx, last_max) > 0)
238                 fp->last_max_sge = idx;
239 }
240
241 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
242                                          u16 sge_len,
243                                          struct eth_end_agg_rx_cqe *cqe)
244 {
245         struct bnx2x *bp = fp->bp;
246         u16 last_max, last_elem, first_elem;
247         u16 delta = 0;
248         u16 i;
249
250         if (!sge_len)
251                 return;
252
253         /* First mark all used pages */
254         for (i = 0; i < sge_len; i++)
255                 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
256                         RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
257
258         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
259            sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
260
261         /* Here we assume that the last SGE index is the biggest */
262         prefetch((void *)(fp->sge_mask));
263         bnx2x_update_last_max_sge(fp,
264                 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
265
266         last_max = RX_SGE(fp->last_max_sge);
267         last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268         first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
269
270         /* If ring is not full */
271         if (last_elem + 1 != first_elem)
272                 last_elem++;
273
274         /* Now update the prod */
275         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276                 if (likely(fp->sge_mask[i]))
277                         break;
278
279                 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280                 delta += BIT_VEC64_ELEM_SZ;
281         }
282
283         if (delta > 0) {
284                 fp->rx_sge_prod += delta;
285                 /* clear page-end entries */
286                 bnx2x_clear_sge_mask_next_elems(fp);
287         }
288
289         DP(NETIF_MSG_RX_STATUS,
290            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
291            fp->last_max_sge, fp->rx_sge_prod);
292 }
293
294 /* Set Toeplitz hash value in the skb using the value from the
295  * CQE (calculated by HW).
296  */
297 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
298                             const struct eth_fast_path_rx_cqe *cqe,
299                             bool *l4_rxhash)
300 {
301         /* Set Toeplitz hash from CQE */
302         if ((bp->dev->features & NETIF_F_RXHASH) &&
303             (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304                 enum eth_rss_hash_type htype;
305
306                 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307                 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308                              (htype == TCP_IPV6_HASH_TYPE);
309                 return le32_to_cpu(cqe->rss_hash_result);
310         }
311         *l4_rxhash = false;
312         return 0;
313 }
314
315 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
316                             u16 cons, u16 prod,
317                             struct eth_fast_path_rx_cqe *cqe)
318 {
319         struct bnx2x *bp = fp->bp;
320         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
321         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
322         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
323         dma_addr_t mapping;
324         struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
325         struct sw_rx_bd *first_buf = &tpa_info->first_buf;
326
327         /* print error if current state != stop */
328         if (tpa_info->tpa_state != BNX2X_TPA_STOP)
329                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
330
331         /* Try to map an empty data buffer from the aggregation info  */
332         mapping = dma_map_single(&bp->pdev->dev,
333                                  first_buf->data + NET_SKB_PAD,
334                                  fp->rx_buf_size, DMA_FROM_DEVICE);
335         /*
336          *  ...if it fails - move the skb from the consumer to the producer
337          *  and set the current aggregation state as ERROR to drop it
338          *  when TPA_STOP arrives.
339          */
340
341         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
342                 /* Move the BD from the consumer to the producer */
343                 bnx2x_reuse_rx_data(fp, cons, prod);
344                 tpa_info->tpa_state = BNX2X_TPA_ERROR;
345                 return;
346         }
347
348         /* move empty data from pool to prod */
349         prod_rx_buf->data = first_buf->data;
350         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
351         /* point prod_bd to new data */
352         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
353         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
354
355         /* move partial skb from cons to pool (don't unmap yet) */
356         *first_buf = *cons_rx_buf;
357
358         /* mark bin state as START */
359         tpa_info->parsing_flags =
360                 le16_to_cpu(cqe->pars_flags.flags);
361         tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
362         tpa_info->tpa_state = BNX2X_TPA_START;
363         tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
364         tpa_info->placement_offset = cqe->placement_offset;
365         tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
366         if (fp->mode == TPA_MODE_GRO) {
367                 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
368                 tpa_info->full_page =
369                         SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
370                 tpa_info->gro_size = gro_size;
371         }
372
373 #ifdef BNX2X_STOP_ON_ERROR
374         fp->tpa_queue_used |= (1 << queue);
375 #ifdef _ASM_GENERIC_INT_L64_H
376         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
377 #else
378         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
379 #endif
380            fp->tpa_queue_used);
381 #endif
382 }
383
384 /* Timestamp option length allowed for TPA aggregation:
385  *
386  *              nop nop kind length echo val
387  */
388 #define TPA_TSTAMP_OPT_LEN      12
389 /**
390  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
391  *
392  * @bp:                 driver handle
393  * @parsing_flags:      parsing flags from the START CQE
394  * @len_on_bd:          total length of the first packet for the
395  *                      aggregation.
396  *
397  * Approximate value of the MSS for this aggregation calculated using
398  * the first packet of it.
399  */
400 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
401                              u16 len_on_bd)
402 {
403         /*
404          * TPA arrgregation won't have either IP options or TCP options
405          * other than timestamp or IPv6 extension headers.
406          */
407         u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
408
409         if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
410             PRS_FLAG_OVERETH_IPV6)
411                 hdrs_len += sizeof(struct ipv6hdr);
412         else /* IPv4 */
413                 hdrs_len += sizeof(struct iphdr);
414
415
416         /* Check if there was a TCP timestamp, if there is it's will
417          * always be 12 bytes length: nop nop kind length echo val.
418          *
419          * Otherwise FW would close the aggregation.
420          */
421         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
422                 hdrs_len += TPA_TSTAMP_OPT_LEN;
423
424         return len_on_bd - hdrs_len;
425 }
426
427 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
428                               struct bnx2x_fastpath *fp, u16 index)
429 {
430         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
431         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
432         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
433         dma_addr_t mapping;
434
435         if (unlikely(page == NULL)) {
436                 BNX2X_ERR("Can't alloc sge\n");
437                 return -ENOMEM;
438         }
439
440         mapping = dma_map_page(&bp->pdev->dev, page, 0,
441                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
442         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443                 __free_pages(page, PAGES_PER_SGE_SHIFT);
444                 BNX2X_ERR("Can't map sge\n");
445                 return -ENOMEM;
446         }
447
448         sw_buf->page = page;
449         dma_unmap_addr_set(sw_buf, mapping, mapping);
450
451         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
452         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
453
454         return 0;
455 }
456
457 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
458                                struct bnx2x_agg_info *tpa_info,
459                                u16 pages,
460                                struct sk_buff *skb,
461                                struct eth_end_agg_rx_cqe *cqe,
462                                u16 cqe_idx)
463 {
464         struct sw_rx_page *rx_pg, old_rx_pg;
465         u32 i, frag_len, frag_size;
466         int err, j, frag_id = 0;
467         u16 len_on_bd = tpa_info->len_on_bd;
468         u16 full_page = 0, gro_size = 0;
469
470         frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
471
472         if (fp->mode == TPA_MODE_GRO) {
473                 gro_size = tpa_info->gro_size;
474                 full_page = tpa_info->full_page;
475         }
476
477         /* This is needed in order to enable forwarding support */
478         if (frag_size) {
479                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
480                                         tpa_info->parsing_flags, len_on_bd);
481
482                 /* set for GRO */
483                 if (fp->mode == TPA_MODE_GRO)
484                         skb_shinfo(skb)->gso_type =
485                             (GET_FLAG(tpa_info->parsing_flags,
486                                       PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
487                                                 PRS_FLAG_OVERETH_IPV6) ?
488                                 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
489         }
490
491
492 #ifdef BNX2X_STOP_ON_ERROR
493         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
494                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
495                           pages, cqe_idx);
496                 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
497                 bnx2x_panic();
498                 return -EINVAL;
499         }
500 #endif
501
502         /* Run through the SGL and compose the fragmented skb */
503         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
504                 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
505
506                 /* FW gives the indices of the SGE as if the ring is an array
507                    (meaning that "next" element will consume 2 indices) */
508                 if (fp->mode == TPA_MODE_GRO)
509                         frag_len = min_t(u32, frag_size, (u32)full_page);
510                 else /* LRO */
511                         frag_len = min_t(u32, frag_size,
512                                          (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
513
514                 rx_pg = &fp->rx_page_ring[sge_idx];
515                 old_rx_pg = *rx_pg;
516
517                 /* If we fail to allocate a substitute page, we simply stop
518                    where we are and drop the whole packet */
519                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
520                 if (unlikely(err)) {
521                         bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
522                         return err;
523                 }
524
525                 /* Unmap the page as we r going to pass it to the stack */
526                 dma_unmap_page(&bp->pdev->dev,
527                                dma_unmap_addr(&old_rx_pg, mapping),
528                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
529                 /* Add one frag and update the appropriate fields in the skb */
530                 if (fp->mode == TPA_MODE_LRO)
531                         skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
532                 else { /* GRO */
533                         int rem;
534                         int offset = 0;
535                         for (rem = frag_len; rem > 0; rem -= gro_size) {
536                                 int len = rem > gro_size ? gro_size : rem;
537                                 skb_fill_page_desc(skb, frag_id++,
538                                                    old_rx_pg.page, offset, len);
539                                 if (offset)
540                                         get_page(old_rx_pg.page);
541                                 offset += len;
542                         }
543                 }
544
545                 skb->data_len += frag_len;
546                 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
547                 skb->len += frag_len;
548
549                 frag_size -= frag_len;
550         }
551
552         return 0;
553 }
554
555 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
556 {
557         if (fp->rx_frag_size)
558                 put_page(virt_to_head_page(data));
559         else
560                 kfree(data);
561 }
562
563 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
564 {
565         if (fp->rx_frag_size)
566                 return netdev_alloc_frag(fp->rx_frag_size);
567
568         return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
569 }
570
571
572 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
573                            struct bnx2x_agg_info *tpa_info,
574                            u16 pages,
575                            struct eth_end_agg_rx_cqe *cqe,
576                            u16 cqe_idx)
577 {
578         struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
579         u8 pad = tpa_info->placement_offset;
580         u16 len = tpa_info->len_on_bd;
581         struct sk_buff *skb = NULL;
582         u8 *new_data, *data = rx_buf->data;
583         u8 old_tpa_state = tpa_info->tpa_state;
584
585         tpa_info->tpa_state = BNX2X_TPA_STOP;
586
587         /* If we there was an error during the handling of the TPA_START -
588          * drop this aggregation.
589          */
590         if (old_tpa_state == BNX2X_TPA_ERROR)
591                 goto drop;
592
593         /* Try to allocate the new data */
594         new_data = bnx2x_frag_alloc(fp);
595         /* Unmap skb in the pool anyway, as we are going to change
596            pool entry status to BNX2X_TPA_STOP even if new skb allocation
597            fails. */
598         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
599                          fp->rx_buf_size, DMA_FROM_DEVICE);
600         if (likely(new_data))
601                 skb = build_skb(data, fp->rx_frag_size);
602
603         if (likely(skb)) {
604 #ifdef BNX2X_STOP_ON_ERROR
605                 if (pad + len > fp->rx_buf_size) {
606                         BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
607                                   pad, len, fp->rx_buf_size);
608                         bnx2x_panic();
609                         return;
610                 }
611 #endif
612
613                 skb_reserve(skb, pad + NET_SKB_PAD);
614                 skb_put(skb, len);
615                 skb->rxhash = tpa_info->rxhash;
616                 skb->l4_rxhash = tpa_info->l4_rxhash;
617
618                 skb->protocol = eth_type_trans(skb, bp->dev);
619                 skb->ip_summed = CHECKSUM_UNNECESSARY;
620
621                 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
622                                          skb, cqe, cqe_idx)) {
623                         if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
624                                 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
625                         napi_gro_receive(&fp->napi, skb);
626                 } else {
627                         DP(NETIF_MSG_RX_STATUS,
628                            "Failed to allocate new pages - dropping packet!\n");
629                         dev_kfree_skb_any(skb);
630                 }
631
632
633                 /* put new data in bin */
634                 rx_buf->data = new_data;
635
636                 return;
637         }
638         bnx2x_frag_free(fp, new_data);
639 drop:
640         /* drop the packet and keep the buffer in the bin */
641         DP(NETIF_MSG_RX_STATUS,
642            "Failed to allocate or map a new skb - dropping packet!\n");
643         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
644 }
645
646 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
647                                struct bnx2x_fastpath *fp, u16 index)
648 {
649         u8 *data;
650         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
651         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
652         dma_addr_t mapping;
653
654         data = bnx2x_frag_alloc(fp);
655         if (unlikely(data == NULL))
656                 return -ENOMEM;
657
658         mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
659                                  fp->rx_buf_size,
660                                  DMA_FROM_DEVICE);
661         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
662                 bnx2x_frag_free(fp, data);
663                 BNX2X_ERR("Can't map rx data\n");
664                 return -ENOMEM;
665         }
666
667         rx_buf->data = data;
668         dma_unmap_addr_set(rx_buf, mapping, mapping);
669
670         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
671         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
672
673         return 0;
674 }
675
676 static
677 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
678                                  struct bnx2x_fastpath *fp,
679                                  struct bnx2x_eth_q_stats *qstats)
680 {
681         /* Do nothing if no L4 csum validation was done.
682          * We do not check whether IP csum was validated. For IPv4 we assume
683          * that if the card got as far as validating the L4 csum, it also
684          * validated the IP csum. IPv6 has no IP csum.
685          */
686         if (cqe->fast_path_cqe.status_flags &
687             ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
688                 return;
689
690         /* If L4 validation was done, check if an error was found. */
691
692         if (cqe->fast_path_cqe.type_error_flags &
693             (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
694              ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
695                 qstats->hw_csum_err++;
696         else
697                 skb->ip_summed = CHECKSUM_UNNECESSARY;
698 }
699
700 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
701 {
702         struct bnx2x *bp = fp->bp;
703         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
704         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
705         int rx_pkt = 0;
706
707 #ifdef BNX2X_STOP_ON_ERROR
708         if (unlikely(bp->panic))
709                 return 0;
710 #endif
711
712         /* CQ "next element" is of the size of the regular element,
713            that's why it's ok here */
714         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
715         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
716                 hw_comp_cons++;
717
718         bd_cons = fp->rx_bd_cons;
719         bd_prod = fp->rx_bd_prod;
720         bd_prod_fw = bd_prod;
721         sw_comp_cons = fp->rx_comp_cons;
722         sw_comp_prod = fp->rx_comp_prod;
723
724         /* Memory barrier necessary as speculative reads of the rx
725          * buffer can be ahead of the index in the status block
726          */
727         rmb();
728
729         DP(NETIF_MSG_RX_STATUS,
730            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
731            fp->index, hw_comp_cons, sw_comp_cons);
732
733         while (sw_comp_cons != hw_comp_cons) {
734                 struct sw_rx_bd *rx_buf = NULL;
735                 struct sk_buff *skb;
736                 union eth_rx_cqe *cqe;
737                 struct eth_fast_path_rx_cqe *cqe_fp;
738                 u8 cqe_fp_flags;
739                 enum eth_rx_cqe_type cqe_fp_type;
740                 u16 len, pad, queue;
741                 u8 *data;
742                 bool l4_rxhash;
743
744 #ifdef BNX2X_STOP_ON_ERROR
745                 if (unlikely(bp->panic))
746                         return 0;
747 #endif
748
749                 comp_ring_cons = RCQ_BD(sw_comp_cons);
750                 bd_prod = RX_BD(bd_prod);
751                 bd_cons = RX_BD(bd_cons);
752
753                 cqe = &fp->rx_comp_ring[comp_ring_cons];
754                 cqe_fp = &cqe->fast_path_cqe;
755                 cqe_fp_flags = cqe_fp->type_error_flags;
756                 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
757
758                 DP(NETIF_MSG_RX_STATUS,
759                    "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
760                    CQE_TYPE(cqe_fp_flags),
761                    cqe_fp_flags, cqe_fp->status_flags,
762                    le32_to_cpu(cqe_fp->rss_hash_result),
763                    le16_to_cpu(cqe_fp->vlan_tag),
764                    le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
765
766                 /* is this a slowpath msg? */
767                 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
768                         bnx2x_sp_event(fp, cqe);
769                         goto next_cqe;
770                 }
771
772                 rx_buf = &fp->rx_buf_ring[bd_cons];
773                 data = rx_buf->data;
774
775                 if (!CQE_TYPE_FAST(cqe_fp_type)) {
776                         struct bnx2x_agg_info *tpa_info;
777                         u16 frag_size, pages;
778 #ifdef BNX2X_STOP_ON_ERROR
779                         /* sanity check */
780                         if (fp->disable_tpa &&
781                             (CQE_TYPE_START(cqe_fp_type) ||
782                              CQE_TYPE_STOP(cqe_fp_type)))
783                                 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
784                                           CQE_TYPE(cqe_fp_type));
785 #endif
786
787                         if (CQE_TYPE_START(cqe_fp_type)) {
788                                 u16 queue = cqe_fp->queue_index;
789                                 DP(NETIF_MSG_RX_STATUS,
790                                    "calling tpa_start on queue %d\n",
791                                    queue);
792
793                                 bnx2x_tpa_start(fp, queue,
794                                                 bd_cons, bd_prod,
795                                                 cqe_fp);
796
797                                 goto next_rx;
798
799                         }
800                         queue = cqe->end_agg_cqe.queue_index;
801                         tpa_info = &fp->tpa_info[queue];
802                         DP(NETIF_MSG_RX_STATUS,
803                            "calling tpa_stop on queue %d\n",
804                            queue);
805
806                         frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
807                                     tpa_info->len_on_bd;
808
809                         if (fp->mode == TPA_MODE_GRO)
810                                 pages = (frag_size + tpa_info->full_page - 1) /
811                                          tpa_info->full_page;
812                         else
813                                 pages = SGE_PAGE_ALIGN(frag_size) >>
814                                         SGE_PAGE_SHIFT;
815
816                         bnx2x_tpa_stop(bp, fp, tpa_info, pages,
817                                        &cqe->end_agg_cqe, comp_ring_cons);
818 #ifdef BNX2X_STOP_ON_ERROR
819                         if (bp->panic)
820                                 return 0;
821 #endif
822
823                         bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
824                         goto next_cqe;
825                 }
826                 /* non TPA */
827                 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
828                 pad = cqe_fp->placement_offset;
829                 dma_sync_single_for_cpu(&bp->pdev->dev,
830                                         dma_unmap_addr(rx_buf, mapping),
831                                         pad + RX_COPY_THRESH,
832                                         DMA_FROM_DEVICE);
833                 pad += NET_SKB_PAD;
834                 prefetch(data + pad); /* speedup eth_type_trans() */
835                 /* is this an error packet? */
836                 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
837                         DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
838                            "ERROR  flags %x  rx packet %u\n",
839                            cqe_fp_flags, sw_comp_cons);
840                         bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
841                         goto reuse_rx;
842                 }
843
844                 /* Since we don't have a jumbo ring
845                  * copy small packets if mtu > 1500
846                  */
847                 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
848                     (len <= RX_COPY_THRESH)) {
849                         skb = netdev_alloc_skb_ip_align(bp->dev, len);
850                         if (skb == NULL) {
851                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
852                                    "ERROR  packet dropped because of alloc failure\n");
853                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
854                                 goto reuse_rx;
855                         }
856                         memcpy(skb->data, data + pad, len);
857                         bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
858                 } else {
859                         if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
860                                 dma_unmap_single(&bp->pdev->dev,
861                                                  dma_unmap_addr(rx_buf, mapping),
862                                                  fp->rx_buf_size,
863                                                  DMA_FROM_DEVICE);
864                                 skb = build_skb(data, fp->rx_frag_size);
865                                 if (unlikely(!skb)) {
866                                         bnx2x_frag_free(fp, data);
867                                         bnx2x_fp_qstats(bp, fp)->
868                                                         rx_skb_alloc_failed++;
869                                         goto next_rx;
870                                 }
871                                 skb_reserve(skb, pad);
872                         } else {
873                                 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
874                                    "ERROR  packet dropped because of alloc failure\n");
875                                 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
876 reuse_rx:
877                                 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
878                                 goto next_rx;
879                         }
880                 }
881
882                 skb_put(skb, len);
883                 skb->protocol = eth_type_trans(skb, bp->dev);
884
885                 /* Set Toeplitz hash for a none-LRO skb */
886                 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
887                 skb->l4_rxhash = l4_rxhash;
888
889                 skb_checksum_none_assert(skb);
890
891                 if (bp->dev->features & NETIF_F_RXCSUM)
892                         bnx2x_csum_validate(skb, cqe, fp,
893                                             bnx2x_fp_qstats(bp, fp));
894
895                 skb_record_rx_queue(skb, fp->rx_queue);
896
897                 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
898                     PARSING_FLAGS_VLAN)
899                         __vlan_hwaccel_put_tag(skb,
900                                                le16_to_cpu(cqe_fp->vlan_tag));
901                 napi_gro_receive(&fp->napi, skb);
902
903
904 next_rx:
905                 rx_buf->data = NULL;
906
907                 bd_cons = NEXT_RX_IDX(bd_cons);
908                 bd_prod = NEXT_RX_IDX(bd_prod);
909                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
910                 rx_pkt++;
911 next_cqe:
912                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
913                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
914
915                 if (rx_pkt == budget)
916                         break;
917         } /* while */
918
919         fp->rx_bd_cons = bd_cons;
920         fp->rx_bd_prod = bd_prod_fw;
921         fp->rx_comp_cons = sw_comp_cons;
922         fp->rx_comp_prod = sw_comp_prod;
923
924         /* Update producers */
925         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
926                              fp->rx_sge_prod);
927
928         fp->rx_pkt += rx_pkt;
929         fp->rx_calls++;
930
931         return rx_pkt;
932 }
933
934 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
935 {
936         struct bnx2x_fastpath *fp = fp_cookie;
937         struct bnx2x *bp = fp->bp;
938         u8 cos;
939
940         DP(NETIF_MSG_INTR,
941            "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
942            fp->index, fp->fw_sb_id, fp->igu_sb_id);
943         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
944
945 #ifdef BNX2X_STOP_ON_ERROR
946         if (unlikely(bp->panic))
947                 return IRQ_HANDLED;
948 #endif
949
950         /* Handle Rx and Tx according to MSI-X vector */
951         prefetch(fp->rx_cons_sb);
952
953         for_each_cos_in_tx_queue(fp, cos)
954                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
955
956         prefetch(&fp->sb_running_index[SM_RX_ID]);
957         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
958
959         return IRQ_HANDLED;
960 }
961
962 /* HW Lock for shared dual port PHYs */
963 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
964 {
965         mutex_lock(&bp->port.phy_mutex);
966
967         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
968 }
969
970 void bnx2x_release_phy_lock(struct bnx2x *bp)
971 {
972         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
973
974         mutex_unlock(&bp->port.phy_mutex);
975 }
976
977 /* calculates MF speed according to current linespeed and MF configuration */
978 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
979 {
980         u16 line_speed = bp->link_vars.line_speed;
981         if (IS_MF(bp)) {
982                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
983                                                    bp->mf_config[BP_VN(bp)]);
984
985                 /* Calculate the current MAX line speed limit for the MF
986                  * devices
987                  */
988                 if (IS_MF_SI(bp))
989                         line_speed = (line_speed * maxCfg) / 100;
990                 else { /* SD mode */
991                         u16 vn_max_rate = maxCfg * 100;
992
993                         if (vn_max_rate < line_speed)
994                                 line_speed = vn_max_rate;
995                 }
996         }
997
998         return line_speed;
999 }
1000
1001 /**
1002  * bnx2x_fill_report_data - fill link report data to report
1003  *
1004  * @bp:         driver handle
1005  * @data:       link state to update
1006  *
1007  * It uses a none-atomic bit operations because is called under the mutex.
1008  */
1009 static void bnx2x_fill_report_data(struct bnx2x *bp,
1010                                    struct bnx2x_link_report_data *data)
1011 {
1012         u16 line_speed = bnx2x_get_mf_speed(bp);
1013
1014         memset(data, 0, sizeof(*data));
1015
1016         /* Fill the report data: efective line speed */
1017         data->line_speed = line_speed;
1018
1019         /* Link is down */
1020         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1021                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1022                           &data->link_report_flags);
1023
1024         /* Full DUPLEX */
1025         if (bp->link_vars.duplex == DUPLEX_FULL)
1026                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1027
1028         /* Rx Flow Control is ON */
1029         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1030                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1031
1032         /* Tx Flow Control is ON */
1033         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1034                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1035 }
1036
1037 /**
1038  * bnx2x_link_report - report link status to OS.
1039  *
1040  * @bp:         driver handle
1041  *
1042  * Calls the __bnx2x_link_report() under the same locking scheme
1043  * as a link/PHY state managing code to ensure a consistent link
1044  * reporting.
1045  */
1046
1047 void bnx2x_link_report(struct bnx2x *bp)
1048 {
1049         bnx2x_acquire_phy_lock(bp);
1050         __bnx2x_link_report(bp);
1051         bnx2x_release_phy_lock(bp);
1052 }
1053
1054 /**
1055  * __bnx2x_link_report - report link status to OS.
1056  *
1057  * @bp:         driver handle
1058  *
1059  * None atomic inmlementation.
1060  * Should be called under the phy_lock.
1061  */
1062 void __bnx2x_link_report(struct bnx2x *bp)
1063 {
1064         struct bnx2x_link_report_data cur_data;
1065
1066         /* reread mf_cfg */
1067         if (!CHIP_IS_E1(bp))
1068                 bnx2x_read_mf_cfg(bp);
1069
1070         /* Read the current link report info */
1071         bnx2x_fill_report_data(bp, &cur_data);
1072
1073         /* Don't report link down or exactly the same link status twice */
1074         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1075             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1076                       &bp->last_reported_link.link_report_flags) &&
1077              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1078                       &cur_data.link_report_flags)))
1079                 return;
1080
1081         bp->link_cnt++;
1082
1083         /* We are going to report a new link parameters now -
1084          * remember the current data for the next time.
1085          */
1086         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1087
1088         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1089                      &cur_data.link_report_flags)) {
1090                 netif_carrier_off(bp->dev);
1091                 netdev_err(bp->dev, "NIC Link is Down\n");
1092                 return;
1093         } else {
1094                 const char *duplex;
1095                 const char *flow;
1096
1097                 netif_carrier_on(bp->dev);
1098
1099                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1100                                        &cur_data.link_report_flags))
1101                         duplex = "full";
1102                 else
1103                         duplex = "half";
1104
1105                 /* Handle the FC at the end so that only these flags would be
1106                  * possibly set. This way we may easily check if there is no FC
1107                  * enabled.
1108                  */
1109                 if (cur_data.link_report_flags) {
1110                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1111                                      &cur_data.link_report_flags)) {
1112                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1113                                      &cur_data.link_report_flags))
1114                                         flow = "ON - receive & transmit";
1115                                 else
1116                                         flow = "ON - receive";
1117                         } else {
1118                                 flow = "ON - transmit";
1119                         }
1120                 } else {
1121                         flow = "none";
1122                 }
1123                 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1124                             cur_data.line_speed, duplex, flow);
1125         }
1126 }
1127
1128 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1129 {
1130         int i;
1131
1132         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1133                 struct eth_rx_sge *sge;
1134
1135                 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1136                 sge->addr_hi =
1137                         cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1138                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1139
1140                 sge->addr_lo =
1141                         cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1142                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1143         }
1144 }
1145
1146 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1147                                 struct bnx2x_fastpath *fp, int last)
1148 {
1149         int i;
1150
1151         for (i = 0; i < last; i++) {
1152                 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1153                 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1154                 u8 *data = first_buf->data;
1155
1156                 if (data == NULL) {
1157                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1158                         continue;
1159                 }
1160                 if (tpa_info->tpa_state == BNX2X_TPA_START)
1161                         dma_unmap_single(&bp->pdev->dev,
1162                                          dma_unmap_addr(first_buf, mapping),
1163                                          fp->rx_buf_size, DMA_FROM_DEVICE);
1164                 bnx2x_frag_free(fp, data);
1165                 first_buf->data = NULL;
1166         }
1167 }
1168
1169 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1170 {
1171         int j;
1172
1173         for_each_rx_queue_cnic(bp, j) {
1174                 struct bnx2x_fastpath *fp = &bp->fp[j];
1175
1176                 fp->rx_bd_cons = 0;
1177
1178                 /* Activate BD ring */
1179                 /* Warning!
1180                  * this will generate an interrupt (to the TSTORM)
1181                  * must only be done after chip is initialized
1182                  */
1183                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1184                                      fp->rx_sge_prod);
1185         }
1186 }
1187
1188 void bnx2x_init_rx_rings(struct bnx2x *bp)
1189 {
1190         int func = BP_FUNC(bp);
1191         u16 ring_prod;
1192         int i, j;
1193
1194         /* Allocate TPA resources */
1195         for_each_eth_queue(bp, j) {
1196                 struct bnx2x_fastpath *fp = &bp->fp[j];
1197
1198                 DP(NETIF_MSG_IFUP,
1199                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1200
1201                 if (!fp->disable_tpa) {
1202                         /* Fill the per-aggregtion pool */
1203                         for (i = 0; i < MAX_AGG_QS(bp); i++) {
1204                                 struct bnx2x_agg_info *tpa_info =
1205                                         &fp->tpa_info[i];
1206                                 struct sw_rx_bd *first_buf =
1207                                         &tpa_info->first_buf;
1208
1209                                 first_buf->data = bnx2x_frag_alloc(fp);
1210                                 if (!first_buf->data) {
1211                                         BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1212                                                   j);
1213                                         bnx2x_free_tpa_pool(bp, fp, i);
1214                                         fp->disable_tpa = 1;
1215                                         break;
1216                                 }
1217                                 dma_unmap_addr_set(first_buf, mapping, 0);
1218                                 tpa_info->tpa_state = BNX2X_TPA_STOP;
1219                         }
1220
1221                         /* "next page" elements initialization */
1222                         bnx2x_set_next_page_sgl(fp);
1223
1224                         /* set SGEs bit mask */
1225                         bnx2x_init_sge_ring_bit_mask(fp);
1226
1227                         /* Allocate SGEs and initialize the ring elements */
1228                         for (i = 0, ring_prod = 0;
1229                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1230
1231                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1232                                         BNX2X_ERR("was only able to allocate %d rx sges\n",
1233                                                   i);
1234                                         BNX2X_ERR("disabling TPA for queue[%d]\n",
1235                                                   j);
1236                                         /* Cleanup already allocated elements */
1237                                         bnx2x_free_rx_sge_range(bp, fp,
1238                                                                 ring_prod);
1239                                         bnx2x_free_tpa_pool(bp, fp,
1240                                                             MAX_AGG_QS(bp));
1241                                         fp->disable_tpa = 1;
1242                                         ring_prod = 0;
1243                                         break;
1244                                 }
1245                                 ring_prod = NEXT_SGE_IDX(ring_prod);
1246                         }
1247
1248                         fp->rx_sge_prod = ring_prod;
1249                 }
1250         }
1251
1252         for_each_eth_queue(bp, j) {
1253                 struct bnx2x_fastpath *fp = &bp->fp[j];
1254
1255                 fp->rx_bd_cons = 0;
1256
1257                 /* Activate BD ring */
1258                 /* Warning!
1259                  * this will generate an interrupt (to the TSTORM)
1260                  * must only be done after chip is initialized
1261                  */
1262                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1263                                      fp->rx_sge_prod);
1264
1265                 if (j != 0)
1266                         continue;
1267
1268                 if (CHIP_IS_E1(bp)) {
1269                         REG_WR(bp, BAR_USTRORM_INTMEM +
1270                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1271                                U64_LO(fp->rx_comp_mapping));
1272                         REG_WR(bp, BAR_USTRORM_INTMEM +
1273                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1274                                U64_HI(fp->rx_comp_mapping));
1275                 }
1276         }
1277 }
1278
1279 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1280 {
1281         u8 cos;
1282         struct bnx2x *bp = fp->bp;
1283
1284         for_each_cos_in_tx_queue(fp, cos) {
1285                 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1286                 unsigned pkts_compl = 0, bytes_compl = 0;
1287
1288                 u16 sw_prod = txdata->tx_pkt_prod;
1289                 u16 sw_cons = txdata->tx_pkt_cons;
1290
1291                 while (sw_cons != sw_prod) {
1292                         bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1293                                           &pkts_compl, &bytes_compl);
1294                         sw_cons++;
1295                 }
1296
1297                 netdev_tx_reset_queue(
1298                         netdev_get_tx_queue(bp->dev,
1299                                             txdata->txq_index));
1300         }
1301 }
1302
1303 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1304 {
1305         int i;
1306
1307         for_each_tx_queue_cnic(bp, i) {
1308                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1309         }
1310 }
1311
1312 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1313 {
1314         int i;
1315
1316         for_each_eth_queue(bp, i) {
1317                 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1318         }
1319 }
1320
1321 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1322 {
1323         struct bnx2x *bp = fp->bp;
1324         int i;
1325
1326         /* ring wasn't allocated */
1327         if (fp->rx_buf_ring == NULL)
1328                 return;
1329
1330         for (i = 0; i < NUM_RX_BD; i++) {
1331                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1332                 u8 *data = rx_buf->data;
1333
1334                 if (data == NULL)
1335                         continue;
1336                 dma_unmap_single(&bp->pdev->dev,
1337                                  dma_unmap_addr(rx_buf, mapping),
1338                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1339
1340                 rx_buf->data = NULL;
1341                 bnx2x_frag_free(fp, data);
1342         }
1343 }
1344
1345 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1346 {
1347         int j;
1348
1349         for_each_rx_queue_cnic(bp, j) {
1350                 bnx2x_free_rx_bds(&bp->fp[j]);
1351         }
1352 }
1353
1354 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1355 {
1356         int j;
1357
1358         for_each_eth_queue(bp, j) {
1359                 struct bnx2x_fastpath *fp = &bp->fp[j];
1360
1361                 bnx2x_free_rx_bds(fp);
1362
1363                 if (!fp->disable_tpa)
1364                         bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1365         }
1366 }
1367
1368 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1369 {
1370         bnx2x_free_tx_skbs_cnic(bp);
1371         bnx2x_free_rx_skbs_cnic(bp);
1372 }
1373
1374 void bnx2x_free_skbs(struct bnx2x *bp)
1375 {
1376         bnx2x_free_tx_skbs(bp);
1377         bnx2x_free_rx_skbs(bp);
1378 }
1379
1380 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1381 {
1382         /* load old values */
1383         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1384
1385         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1386                 /* leave all but MAX value */
1387                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1388
1389                 /* set new MAX value */
1390                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1391                                 & FUNC_MF_CFG_MAX_BW_MASK;
1392
1393                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1394         }
1395 }
1396
1397 /**
1398  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1399  *
1400  * @bp:         driver handle
1401  * @nvecs:      number of vectors to be released
1402  */
1403 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1404 {
1405         int i, offset = 0;
1406
1407         if (nvecs == offset)
1408                 return;
1409         free_irq(bp->msix_table[offset].vector, bp->dev);
1410         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1411            bp->msix_table[offset].vector);
1412         offset++;
1413
1414         if (CNIC_SUPPORT(bp)) {
1415                 if (nvecs == offset)
1416                         return;
1417                 offset++;
1418         }
1419
1420         for_each_eth_queue(bp, i) {
1421                 if (nvecs == offset)
1422                         return;
1423                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1424                    i, bp->msix_table[offset].vector);
1425
1426                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1427         }
1428 }
1429
1430 void bnx2x_free_irq(struct bnx2x *bp)
1431 {
1432         if (bp->flags & USING_MSIX_FLAG &&
1433             !(bp->flags & USING_SINGLE_MSIX_FLAG))
1434                 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1435                                      CNIC_SUPPORT(bp) + 1);
1436         else
1437                 free_irq(bp->dev->irq, bp->dev);
1438 }
1439
1440 int bnx2x_enable_msix(struct bnx2x *bp)
1441 {
1442         int msix_vec = 0, i, rc, req_cnt;
1443
1444         bp->msix_table[msix_vec].entry = msix_vec;
1445         BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1446            bp->msix_table[0].entry);
1447         msix_vec++;
1448
1449         /* Cnic requires an msix vector for itself */
1450         if (CNIC_SUPPORT(bp)) {
1451                 bp->msix_table[msix_vec].entry = msix_vec;
1452                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1453                                msix_vec, bp->msix_table[msix_vec].entry);
1454                 msix_vec++;
1455         }
1456
1457         /* We need separate vectors for ETH queues only (not FCoE) */
1458         for_each_eth_queue(bp, i) {
1459                 bp->msix_table[msix_vec].entry = msix_vec;
1460                 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1461                                msix_vec, msix_vec, i);
1462                 msix_vec++;
1463         }
1464
1465         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1466
1467         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1468
1469         /*
1470          * reconfigure number of tx/rx queues according to available
1471          * MSI-X vectors
1472          */
1473         if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1474                 /* how less vectors we will have? */
1475                 int diff = req_cnt - rc;
1476
1477                 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1478
1479                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1480
1481                 if (rc) {
1482                         BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1483                         goto no_msix;
1484                 }
1485                 /*
1486                  * decrease number of queues by number of unallocated entries
1487                  */
1488                 bp->num_ethernet_queues -= diff;
1489                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1490
1491                 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1492                                bp->num_queues);
1493         } else if (rc > 0) {
1494                 /* Get by with single vector */
1495                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1496                 if (rc) {
1497                         BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1498                                        rc);
1499                         goto no_msix;
1500                 }
1501
1502                 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1503                 bp->flags |= USING_SINGLE_MSIX_FLAG;
1504
1505                 BNX2X_DEV_INFO("set number of queues to 1\n");
1506                 bp->num_ethernet_queues = 1;
1507                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1508         } else if (rc < 0) {
1509                 BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
1510                 goto no_msix;
1511         }
1512
1513         bp->flags |= USING_MSIX_FLAG;
1514
1515         return 0;
1516
1517 no_msix:
1518         /* fall to INTx if not enough memory */
1519         if (rc == -ENOMEM)
1520                 bp->flags |= DISABLE_MSI_FLAG;
1521
1522         return rc;
1523 }
1524
1525 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1526 {
1527         int i, rc, offset = 0;
1528
1529         rc = request_irq(bp->msix_table[offset++].vector,
1530                          bnx2x_msix_sp_int, 0,
1531                          bp->dev->name, bp->dev);
1532         if (rc) {
1533                 BNX2X_ERR("request sp irq failed\n");
1534                 return -EBUSY;
1535         }
1536
1537         if (CNIC_SUPPORT(bp))
1538                 offset++;
1539
1540         for_each_eth_queue(bp, i) {
1541                 struct bnx2x_fastpath *fp = &bp->fp[i];
1542                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1543                          bp->dev->name, i);
1544
1545                 rc = request_irq(bp->msix_table[offset].vector,
1546                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1547                 if (rc) {
1548                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1549                               bp->msix_table[offset].vector, rc);
1550                         bnx2x_free_msix_irqs(bp, offset);
1551                         return -EBUSY;
1552                 }
1553
1554                 offset++;
1555         }
1556
1557         i = BNX2X_NUM_ETH_QUEUES(bp);
1558         offset = 1 + CNIC_SUPPORT(bp);
1559         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1560                bp->msix_table[0].vector,
1561                0, bp->msix_table[offset].vector,
1562                i - 1, bp->msix_table[offset + i - 1].vector);
1563
1564         return 0;
1565 }
1566
1567 int bnx2x_enable_msi(struct bnx2x *bp)
1568 {
1569         int rc;
1570
1571         rc = pci_enable_msi(bp->pdev);
1572         if (rc) {
1573                 BNX2X_DEV_INFO("MSI is not attainable\n");
1574                 return -1;
1575         }
1576         bp->flags |= USING_MSI_FLAG;
1577
1578         return 0;
1579 }
1580
1581 static int bnx2x_req_irq(struct bnx2x *bp)
1582 {
1583         unsigned long flags;
1584         unsigned int irq;
1585
1586         if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1587                 flags = 0;
1588         else
1589                 flags = IRQF_SHARED;
1590
1591         if (bp->flags & USING_MSIX_FLAG)
1592                 irq = bp->msix_table[0].vector;
1593         else
1594                 irq = bp->pdev->irq;
1595
1596         return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1597 }
1598
1599 static int bnx2x_setup_irqs(struct bnx2x *bp)
1600 {
1601         int rc = 0;
1602         if (bp->flags & USING_MSIX_FLAG &&
1603             !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1604                 rc = bnx2x_req_msix_irqs(bp);
1605                 if (rc)
1606                         return rc;
1607         } else {
1608                 bnx2x_ack_int(bp);
1609                 rc = bnx2x_req_irq(bp);
1610                 if (rc) {
1611                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1612                         return rc;
1613                 }
1614                 if (bp->flags & USING_MSI_FLAG) {
1615                         bp->dev->irq = bp->pdev->irq;
1616                         netdev_info(bp->dev, "using MSI IRQ %d\n",
1617                                     bp->dev->irq);
1618                 }
1619                 if (bp->flags & USING_MSIX_FLAG) {
1620                         bp->dev->irq = bp->msix_table[0].vector;
1621                         netdev_info(bp->dev, "using MSIX IRQ %d\n",
1622                                     bp->dev->irq);
1623                 }
1624         }
1625
1626         return 0;
1627 }
1628
1629 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1630 {
1631         int i;
1632
1633         for_each_rx_queue_cnic(bp, i)
1634                 napi_enable(&bnx2x_fp(bp, i, napi));
1635 }
1636
1637 static void bnx2x_napi_enable(struct bnx2x *bp)
1638 {
1639         int i;
1640
1641         for_each_eth_queue(bp, i)
1642                 napi_enable(&bnx2x_fp(bp, i, napi));
1643 }
1644
1645 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1646 {
1647         int i;
1648
1649         for_each_rx_queue_cnic(bp, i)
1650                 napi_disable(&bnx2x_fp(bp, i, napi));
1651 }
1652
1653 static void bnx2x_napi_disable(struct bnx2x *bp)
1654 {
1655         int i;
1656
1657         for_each_eth_queue(bp, i)
1658                 napi_disable(&bnx2x_fp(bp, i, napi));
1659 }
1660
1661 void bnx2x_netif_start(struct bnx2x *bp)
1662 {
1663         if (netif_running(bp->dev)) {
1664                 bnx2x_napi_enable(bp);
1665                 if (CNIC_LOADED(bp))
1666                         bnx2x_napi_enable_cnic(bp);
1667                 bnx2x_int_enable(bp);
1668                 if (bp->state == BNX2X_STATE_OPEN)
1669                         netif_tx_wake_all_queues(bp->dev);
1670         }
1671 }
1672
1673 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1674 {
1675         bnx2x_int_disable_sync(bp, disable_hw);
1676         bnx2x_napi_disable(bp);
1677         if (CNIC_LOADED(bp))
1678                 bnx2x_napi_disable_cnic(bp);
1679 }
1680
1681 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1682 {
1683         struct bnx2x *bp = netdev_priv(dev);
1684
1685         if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1686                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1687                 u16 ether_type = ntohs(hdr->h_proto);
1688
1689                 /* Skip VLAN tag if present */
1690                 if (ether_type == ETH_P_8021Q) {
1691                         struct vlan_ethhdr *vhdr =
1692                                 (struct vlan_ethhdr *)skb->data;
1693
1694                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1695                 }
1696
1697                 /* If ethertype is FCoE or FIP - use FCoE ring */
1698                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1699                         return bnx2x_fcoe_tx(bp, txq_index);
1700         }
1701
1702         /* select a non-FCoE queue */
1703         return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1704 }
1705
1706
1707 void bnx2x_set_num_queues(struct bnx2x *bp)
1708 {
1709         /* RSS queues */
1710         bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1711
1712         /* override in STORAGE SD modes */
1713         if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1714                 bp->num_ethernet_queues = 1;
1715
1716         /* Add special queues */
1717         bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1718         bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1719
1720         BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1721 }
1722
1723 /**
1724  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1725  *
1726  * @bp:         Driver handle
1727  *
1728  * We currently support for at most 16 Tx queues for each CoS thus we will
1729  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1730  * bp->max_cos.
1731  *
1732  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1733  * index after all ETH L2 indices.
1734  *
1735  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1736  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1737  * 16..31,...) with indicies that are not coupled with any real Tx queue.
1738  *
1739  * The proper configuration of skb->queue_mapping is handled by
1740  * bnx2x_select_queue() and __skb_tx_hash().
1741  *
1742  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1743  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1744  */
1745 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1746 {
1747         int rc, tx, rx;
1748
1749         tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1750         rx = BNX2X_NUM_ETH_QUEUES(bp);
1751
1752 /* account for fcoe queue */
1753         if (include_cnic && !NO_FCOE(bp)) {
1754                 rx++;
1755                 tx++;
1756         }
1757
1758         rc = netif_set_real_num_tx_queues(bp->dev, tx);
1759         if (rc) {
1760                 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1761                 return rc;
1762         }
1763         rc = netif_set_real_num_rx_queues(bp->dev, rx);
1764         if (rc) {
1765                 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1766                 return rc;
1767         }
1768
1769         DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1770                           tx, rx);
1771
1772         return rc;
1773 }
1774
1775 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1776 {
1777         int i;
1778
1779         for_each_queue(bp, i) {
1780                 struct bnx2x_fastpath *fp = &bp->fp[i];
1781                 u32 mtu;
1782
1783                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1784                 if (IS_FCOE_IDX(i))
1785                         /*
1786                          * Although there are no IP frames expected to arrive to
1787                          * this ring we still want to add an
1788                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1789                          * overrun attack.
1790                          */
1791                         mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1792                 else
1793                         mtu = bp->dev->mtu;
1794                 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1795                                   IP_HEADER_ALIGNMENT_PADDING +
1796                                   ETH_OVREHEAD +
1797                                   mtu +
1798                                   BNX2X_FW_RX_ALIGN_END;
1799                 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1800                 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1801                         fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1802                 else
1803                         fp->rx_frag_size = 0;
1804         }
1805 }
1806
1807 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1808 {
1809         int i;
1810         u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1811
1812         /* Prepare the initial contents fo the indirection table if RSS is
1813          * enabled
1814          */
1815         for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1816                 bp->rss_conf_obj.ind_table[i] =
1817                         bp->fp->cl_id +
1818                         ethtool_rxfh_indir_default(i, num_eth_queues);
1819
1820         /*
1821          * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1822          * per-port, so if explicit configuration is needed , do it only
1823          * for a PMF.
1824          *
1825          * For 57712 and newer on the other hand it's a per-function
1826          * configuration.
1827          */
1828         return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1829 }
1830
1831 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1832                         bool config_hash)
1833 {
1834         struct bnx2x_config_rss_params params = {NULL};
1835         int i;
1836
1837         /* Although RSS is meaningless when there is a single HW queue we
1838          * still need it enabled in order to have HW Rx hash generated.
1839          *
1840          * if (!is_eth_multi(bp))
1841          *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
1842          */
1843
1844         params.rss_obj = rss_obj;
1845
1846         __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1847
1848         __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1849
1850         /* RSS configuration */
1851         __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1852         __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1853         __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1854         __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1855         if (rss_obj->udp_rss_v4)
1856                 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1857         if (rss_obj->udp_rss_v6)
1858                 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
1859
1860         /* Hash bits */
1861         params.rss_result_mask = MULTI_MASK;
1862
1863         memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1864
1865         if (config_hash) {
1866                 /* RSS keys */
1867                 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1868                         params.rss_key[i] = random32();
1869
1870                 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1871         }
1872
1873         return bnx2x_config_rss(bp, &params);
1874 }
1875
1876 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1877 {
1878         struct bnx2x_func_state_params func_params = {NULL};
1879
1880         /* Prepare parameters for function state transitions */
1881         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1882
1883         func_params.f_obj = &bp->func_obj;
1884         func_params.cmd = BNX2X_F_CMD_HW_INIT;
1885
1886         func_params.params.hw_init.load_phase = load_code;
1887
1888         return bnx2x_func_state_change(bp, &func_params);
1889 }
1890
1891 /*
1892  * Cleans the object that have internal lists without sending
1893  * ramrods. Should be run when interrutps are disabled.
1894  */
1895 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1896 {
1897         int rc;
1898         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1899         struct bnx2x_mcast_ramrod_params rparam = {NULL};
1900         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1901
1902         /***************** Cleanup MACs' object first *************************/
1903
1904         /* Wait for completion of requested */
1905         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1906         /* Perform a dry cleanup */
1907         __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1908
1909         /* Clean ETH primary MAC */
1910         __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1911         rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1912                                  &ramrod_flags);
1913         if (rc != 0)
1914                 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1915
1916         /* Cleanup UC list */
1917         vlan_mac_flags = 0;
1918         __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1919         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1920                                  &ramrod_flags);
1921         if (rc != 0)
1922                 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1923
1924         /***************** Now clean mcast object *****************************/
1925         rparam.mcast_obj = &bp->mcast_obj;
1926         __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1927
1928         /* Add a DEL command... */
1929         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1930         if (rc < 0)
1931                 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1932                           rc);
1933
1934         /* ...and wait until all pending commands are cleared */
1935         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1936         while (rc != 0) {
1937                 if (rc < 0) {
1938                         BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1939                                   rc);
1940                         return;
1941                 }
1942
1943                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1944         }
1945 }
1946
1947 #ifndef BNX2X_STOP_ON_ERROR
1948 #define LOAD_ERROR_EXIT(bp, label) \
1949         do { \
1950                 (bp)->state = BNX2X_STATE_ERROR; \
1951                 goto label; \
1952         } while (0)
1953
1954 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1955         do { \
1956                 bp->cnic_loaded = false; \
1957                 goto label; \
1958         } while (0)
1959 #else /*BNX2X_STOP_ON_ERROR*/
1960 #define LOAD_ERROR_EXIT(bp, label) \
1961         do { \
1962                 (bp)->state = BNX2X_STATE_ERROR; \
1963                 (bp)->panic = 1; \
1964                 return -EBUSY; \
1965         } while (0)
1966 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1967         do { \
1968                 bp->cnic_loaded = false; \
1969                 (bp)->panic = 1; \
1970                 return -EBUSY; \
1971         } while (0)
1972 #endif /*BNX2X_STOP_ON_ERROR*/
1973
1974 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1975 {
1976         /* build FW version dword */
1977         u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1978                     (BCM_5710_FW_MINOR_VERSION << 8) +
1979                     (BCM_5710_FW_REVISION_VERSION << 16) +
1980                     (BCM_5710_FW_ENGINEERING_VERSION << 24);
1981
1982         /* read loaded FW from chip */
1983         u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1984
1985         DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1986
1987         if (loaded_fw != my_fw) {
1988                 if (is_err)
1989                         BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1990                                   loaded_fw, my_fw);
1991                 return false;
1992         }
1993
1994         return true;
1995 }
1996
1997 /**
1998  * bnx2x_bz_fp - zero content of the fastpath structure.
1999  *
2000  * @bp:         driver handle
2001  * @index:      fastpath index to be zeroed
2002  *
2003  * Makes sure the contents of the bp->fp[index].napi is kept
2004  * intact.
2005  */
2006 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2007 {
2008         struct bnx2x_fastpath *fp = &bp->fp[index];
2009         struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2010
2011         int cos;
2012         struct napi_struct orig_napi = fp->napi;
2013         struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2014         /* bzero bnx2x_fastpath contents */
2015         if (bp->stats_init) {
2016                 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2017                 memset(fp, 0, sizeof(*fp));
2018         } else {
2019                 /* Keep Queue statistics */
2020                 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2021                 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2022
2023                 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2024                                           GFP_KERNEL);
2025                 if (tmp_eth_q_stats)
2026                         memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
2027                                sizeof(struct bnx2x_eth_q_stats));
2028
2029                 tmp_eth_q_stats_old =
2030                         kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2031                                 GFP_KERNEL);
2032                 if (tmp_eth_q_stats_old)
2033                         memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
2034                                sizeof(struct bnx2x_eth_q_stats_old));
2035
2036                 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2037                 memset(fp, 0, sizeof(*fp));
2038
2039                 if (tmp_eth_q_stats) {
2040                         memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2041                                sizeof(struct bnx2x_eth_q_stats));
2042                         kfree(tmp_eth_q_stats);
2043                 }
2044
2045                 if (tmp_eth_q_stats_old) {
2046                         memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
2047                                sizeof(struct bnx2x_eth_q_stats_old));
2048                         kfree(tmp_eth_q_stats_old);
2049                 }
2050
2051         }
2052
2053         /* Restore the NAPI object as it has been already initialized */
2054         fp->napi = orig_napi;
2055         fp->tpa_info = orig_tpa_info;
2056         fp->bp = bp;
2057         fp->index = index;
2058         if (IS_ETH_FP(fp))
2059                 fp->max_cos = bp->max_cos;
2060         else
2061                 /* Special queues support only one CoS */
2062                 fp->max_cos = 1;
2063
2064         /* Init txdata pointers */
2065         if (IS_FCOE_FP(fp))
2066                 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2067         if (IS_ETH_FP(fp))
2068                 for_each_cos_in_tx_queue(fp, cos)
2069                         fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2070                                 BNX2X_NUM_ETH_QUEUES(bp) + index];
2071
2072         /*
2073          * set the tpa flag for each queue. The tpa flag determines the queue
2074          * minimal size so it must be set prior to queue memory allocation
2075          */
2076         fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2077                                   (bp->flags & GRO_ENABLE_FLAG &&
2078                                    bnx2x_mtu_allows_gro(bp->dev->mtu)));
2079         if (bp->flags & TPA_ENABLE_FLAG)
2080                 fp->mode = TPA_MODE_LRO;
2081         else if (bp->flags & GRO_ENABLE_FLAG)
2082                 fp->mode = TPA_MODE_GRO;
2083
2084         /* We don't want TPA on an FCoE L2 ring */
2085         if (IS_FCOE_FP(fp))
2086                 fp->disable_tpa = 1;
2087 }
2088
2089 int bnx2x_load_cnic(struct bnx2x *bp)
2090 {
2091         int i, rc, port = BP_PORT(bp);
2092
2093         DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2094
2095         mutex_init(&bp->cnic_mutex);
2096
2097         rc = bnx2x_alloc_mem_cnic(bp);
2098         if (rc) {
2099                 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2100                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2101         }
2102
2103         rc = bnx2x_alloc_fp_mem_cnic(bp);
2104         if (rc) {
2105                 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2106                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2107         }
2108
2109         /* Update the number of queues with the cnic queues */
2110         rc = bnx2x_set_real_num_queues(bp, 1);
2111         if (rc) {
2112                 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2113                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2114         }
2115
2116         /* Add all CNIC NAPI objects */
2117         bnx2x_add_all_napi_cnic(bp);
2118         DP(NETIF_MSG_IFUP, "cnic napi added\n");
2119         bnx2x_napi_enable_cnic(bp);
2120
2121         rc = bnx2x_init_hw_func_cnic(bp);
2122         if (rc)
2123                 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2124
2125         bnx2x_nic_init_cnic(bp);
2126
2127         /* Enable Timer scan */
2128         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2129
2130         for_each_cnic_queue(bp, i) {
2131                 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2132                 if (rc) {
2133                         BNX2X_ERR("Queue setup failed\n");
2134                         LOAD_ERROR_EXIT(bp, load_error_cnic2);
2135                 }
2136         }
2137
2138         /* Initialize Rx filter. */
2139         netif_addr_lock_bh(bp->dev);
2140         bnx2x_set_rx_mode(bp->dev);
2141         netif_addr_unlock_bh(bp->dev);
2142
2143         /* re-read iscsi info */
2144         bnx2x_get_iscsi_info(bp);
2145         bnx2x_setup_cnic_irq_info(bp);
2146         bnx2x_setup_cnic_info(bp);
2147         bp->cnic_loaded = true;
2148         if (bp->state == BNX2X_STATE_OPEN)
2149                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2150
2151
2152         DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2153
2154         return 0;
2155
2156 #ifndef BNX2X_STOP_ON_ERROR
2157 load_error_cnic2:
2158         /* Disable Timer scan */
2159         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2160
2161 load_error_cnic1:
2162         bnx2x_napi_disable_cnic(bp);
2163         /* Update the number of queues without the cnic queues */
2164         rc = bnx2x_set_real_num_queues(bp, 0);
2165         if (rc)
2166                 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2167 load_error_cnic0:
2168         BNX2X_ERR("CNIC-related load failed\n");
2169         bnx2x_free_fp_mem_cnic(bp);
2170         bnx2x_free_mem_cnic(bp);
2171         return rc;
2172 #endif /* ! BNX2X_STOP_ON_ERROR */
2173 }
2174
2175
2176 /* must be called with rtnl_lock */
2177 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2178 {
2179         int port = BP_PORT(bp);
2180         u32 load_code;
2181         int i, rc;
2182
2183         DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2184         DP(NETIF_MSG_IFUP,
2185            "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2186
2187 #ifdef BNX2X_STOP_ON_ERROR
2188         if (unlikely(bp->panic)) {
2189                 BNX2X_ERR("Can't load NIC when there is panic\n");
2190                 return -EPERM;
2191         }
2192 #endif
2193
2194         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2195
2196         /* Set the initial link reported state to link down */
2197         bnx2x_acquire_phy_lock(bp);
2198         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2199         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2200                 &bp->last_reported_link.link_report_flags);
2201         bnx2x_release_phy_lock(bp);
2202
2203         /* must be called before memory allocation and HW init */
2204         bnx2x_ilt_set_info(bp);
2205
2206         /*
2207          * Zero fastpath structures preserving invariants like napi, which are
2208          * allocated only once, fp index, max_cos, bp pointer.
2209          * Also set fp->disable_tpa and txdata_ptr.
2210          */
2211         DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2212         for_each_queue(bp, i)
2213                 bnx2x_bz_fp(bp, i);
2214         memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2215                                   bp->num_cnic_queues) *
2216                                   sizeof(struct bnx2x_fp_txdata));
2217
2218         bp->fcoe_init = false;
2219
2220         /* Set the receive queues buffer size */
2221         bnx2x_set_rx_buf_size(bp);
2222
2223         if (bnx2x_alloc_mem(bp))
2224                 return -ENOMEM;
2225
2226         /* As long as bnx2x_alloc_mem() may possibly update
2227          * bp->num_queues, bnx2x_set_real_num_queues() should always
2228          * come after it. At this stage cnic queues are not counted.
2229          */
2230         rc = bnx2x_set_real_num_queues(bp, 0);
2231         if (rc) {
2232                 BNX2X_ERR("Unable to set real_num_queues\n");
2233                 LOAD_ERROR_EXIT(bp, load_error0);
2234         }
2235
2236         /* configure multi cos mappings in kernel.
2237          * this configuration may be overriden by a multi class queue discipline
2238          * or by a dcbx negotiation result.
2239          */
2240         bnx2x_setup_tc(bp->dev, bp->max_cos);
2241
2242         /* Add all NAPI objects */
2243         bnx2x_add_all_napi(bp);
2244         DP(NETIF_MSG_IFUP, "napi added\n");
2245         bnx2x_napi_enable(bp);
2246
2247         /* set pf load just before approaching the MCP */
2248         bnx2x_set_pf_load(bp);
2249
2250         /* Send LOAD_REQUEST command to MCP
2251          * Returns the type of LOAD command:
2252          * if it is the first port to be initialized
2253          * common blocks should be initialized, otherwise - not
2254          */
2255         if (!BP_NOMCP(bp)) {
2256                 /* init fw_seq */
2257                 bp->fw_seq =
2258                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2259                          DRV_MSG_SEQ_NUMBER_MASK);
2260                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2261
2262                 /* Get current FW pulse sequence */
2263                 bp->fw_drv_pulse_wr_seq =
2264                         (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2265                          DRV_PULSE_SEQ_MASK);
2266                 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2267
2268                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2269                                              DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2270                 if (!load_code) {
2271                         BNX2X_ERR("MCP response failure, aborting\n");
2272                         rc = -EBUSY;
2273                         LOAD_ERROR_EXIT(bp, load_error1);
2274                 }
2275                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2276                         BNX2X_ERR("Driver load refused\n");
2277                         rc = -EBUSY; /* other port in diagnostic mode */
2278                         LOAD_ERROR_EXIT(bp, load_error1);
2279                 }
2280                 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2281                     load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2282                         /* abort nic load if version mismatch */
2283                         if (!bnx2x_test_firmware_version(bp, true)) {
2284                                 rc = -EBUSY;
2285                                 LOAD_ERROR_EXIT(bp, load_error2);
2286                         }
2287                 }
2288
2289         } else {
2290                 int path = BP_PATH(bp);
2291
2292                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2293                    path, load_count[path][0], load_count[path][1],
2294                    load_count[path][2]);
2295                 load_count[path][0]++;
2296                 load_count[path][1 + port]++;
2297                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2298                    path, load_count[path][0], load_count[path][1],
2299                    load_count[path][2]);
2300                 if (load_count[path][0] == 1)
2301                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2302                 else if (load_count[path][1 + port] == 1)
2303                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2304                 else
2305                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2306         }
2307
2308         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2309             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2310             (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2311                 bp->port.pmf = 1;
2312                 /*
2313                  * We need the barrier to ensure the ordering between the
2314                  * writing to bp->port.pmf here and reading it from the
2315                  * bnx2x_periodic_task().
2316                  */
2317                 smp_mb();
2318         } else
2319                 bp->port.pmf = 0;
2320
2321         DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2322
2323         /* Init Function state controlling object */
2324         bnx2x__init_func_obj(bp);
2325
2326         /* Initialize HW */
2327         rc = bnx2x_init_hw(bp, load_code);
2328         if (rc) {
2329                 BNX2X_ERR("HW init failed, aborting\n");
2330                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2331                 LOAD_ERROR_EXIT(bp, load_error2);
2332         }
2333
2334         /* Connect to IRQs */
2335         rc = bnx2x_setup_irqs(bp);
2336         if (rc) {
2337                 BNX2X_ERR("IRQs setup failed\n");
2338                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2339                 LOAD_ERROR_EXIT(bp, load_error2);
2340         }
2341
2342         /* Setup NIC internals and enable interrupts */
2343         bnx2x_nic_init(bp, load_code);
2344
2345         /* Init per-function objects */
2346         bnx2x_init_bp_objs(bp);
2347
2348         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2349             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2350             (bp->common.shmem2_base)) {
2351                 if (SHMEM2_HAS(bp, dcc_support))
2352                         SHMEM2_WR(bp, dcc_support,
2353                                   (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2354                                    SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2355                 if (SHMEM2_HAS(bp, afex_driver_support))
2356                         SHMEM2_WR(bp, afex_driver_support,
2357                                   SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2358         }
2359
2360         /* Set AFEX default VLAN tag to an invalid value */
2361         bp->afex_def_vlan_tag = -1;
2362
2363         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2364         rc = bnx2x_func_start(bp);
2365         if (rc) {
2366                 BNX2X_ERR("Function start failed!\n");
2367                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2368                 LOAD_ERROR_EXIT(bp, load_error3);
2369         }
2370
2371         /* Send LOAD_DONE command to MCP */
2372         if (!BP_NOMCP(bp)) {
2373                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2374                 if (!load_code) {
2375                         BNX2X_ERR("MCP response failure, aborting\n");
2376                         rc = -EBUSY;
2377                         LOAD_ERROR_EXIT(bp, load_error3);
2378                 }
2379         }
2380
2381         rc = bnx2x_setup_leading(bp);
2382         if (rc) {
2383                 BNX2X_ERR("Setup leading failed!\n");
2384                 LOAD_ERROR_EXIT(bp, load_error3);
2385         }
2386
2387         for_each_nondefault_eth_queue(bp, i) {
2388                 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2389                 if (rc) {
2390                         BNX2X_ERR("Queue setup failed\n");
2391                         LOAD_ERROR_EXIT(bp, load_error3);
2392                 }
2393         }
2394
2395         rc = bnx2x_init_rss_pf(bp);
2396         if (rc) {
2397                 BNX2X_ERR("PF RSS init failed\n");
2398                 LOAD_ERROR_EXIT(bp, load_error3);
2399         }
2400
2401         /* Now when Clients are configured we are ready to work */
2402         bp->state = BNX2X_STATE_OPEN;
2403
2404         /* Configure a ucast MAC */
2405         rc = bnx2x_set_eth_mac(bp, true);
2406         if (rc) {
2407                 BNX2X_ERR("Setting Ethernet MAC failed\n");
2408                 LOAD_ERROR_EXIT(bp, load_error3);
2409         }
2410
2411         if (bp->pending_max) {
2412                 bnx2x_update_max_mf_config(bp, bp->pending_max);
2413                 bp->pending_max = 0;
2414         }
2415
2416         if (bp->port.pmf)
2417                 bnx2x_initial_phy_init(bp, load_mode);
2418         bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2419
2420         /* Start fast path */
2421
2422         /* Initialize Rx filter. */
2423         netif_addr_lock_bh(bp->dev);
2424         bnx2x_set_rx_mode(bp->dev);
2425         netif_addr_unlock_bh(bp->dev);
2426
2427         /* Start the Tx */
2428         switch (load_mode) {
2429         case LOAD_NORMAL:
2430                 /* Tx queue should be only reenabled */
2431                 netif_tx_wake_all_queues(bp->dev);
2432                 break;
2433
2434         case LOAD_OPEN:
2435                 netif_tx_start_all_queues(bp->dev);
2436                 smp_mb__after_clear_bit();
2437                 break;
2438
2439         case LOAD_DIAG:
2440         case LOAD_LOOPBACK_EXT:
2441                 bp->state = BNX2X_STATE_DIAG;
2442                 break;
2443
2444         default:
2445                 break;
2446         }
2447
2448         if (bp->port.pmf)
2449                 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2450         else
2451                 bnx2x__link_status_update(bp);
2452
2453         /* start the timer */
2454         mod_timer(&bp->timer, jiffies + bp->current_interval);
2455
2456         if (CNIC_ENABLED(bp))
2457                 bnx2x_load_cnic(bp);
2458
2459         /* mark driver is loaded in shmem2 */
2460         if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2461                 u32 val;
2462                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2463                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2464                           val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2465                           DRV_FLAGS_CAPABILITIES_LOADED_L2);
2466         }
2467
2468         /* Wait for all pending SP commands to complete */
2469         if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2470                 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2471                 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2472                 return -EBUSY;
2473         }
2474
2475         /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2476         if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2477                 bnx2x_dcbx_init(bp, false);
2478
2479         DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2480
2481         return 0;
2482
2483 #ifndef BNX2X_STOP_ON_ERROR
2484 load_error3:
2485         bnx2x_int_disable_sync(bp, 1);
2486
2487         /* Clean queueable objects */
2488         bnx2x_squeeze_objects(bp);
2489
2490         /* Free SKBs, SGEs, TPA pool and driver internals */
2491         bnx2x_free_skbs(bp);
2492         for_each_rx_queue(bp, i)
2493                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2494
2495         /* Release IRQs */
2496         bnx2x_free_irq(bp);
2497 load_error2:
2498         if (!BP_NOMCP(bp)) {
2499                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2500                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2501         }
2502
2503         bp->port.pmf = 0;
2504 load_error1:
2505         bnx2x_napi_disable(bp);
2506         /* clear pf_load status, as it was already set */
2507         bnx2x_clear_pf_load(bp);
2508 load_error0:
2509         bnx2x_free_mem(bp);
2510
2511         return rc;
2512 #endif /* ! BNX2X_STOP_ON_ERROR */
2513 }
2514
2515 /* must be called with rtnl_lock */
2516 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2517 {
2518         int i;
2519         bool global = false;
2520
2521         DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2522
2523         /* mark driver is unloaded in shmem2 */
2524         if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2525                 u32 val;
2526                 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2527                 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2528                           val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2529         }
2530
2531         if ((bp->state == BNX2X_STATE_CLOSED) ||
2532             (bp->state == BNX2X_STATE_ERROR)) {
2533                 /* We can get here if the driver has been unloaded
2534                  * during parity error recovery and is either waiting for a
2535                  * leader to complete or for other functions to unload and
2536                  * then ifdown has been issued. In this case we want to
2537                  * unload and let other functions to complete a recovery
2538                  * process.
2539                  */
2540                 bp->recovery_state = BNX2X_RECOVERY_DONE;
2541                 bp->is_leader = 0;
2542                 bnx2x_release_leader_lock(bp);
2543                 smp_mb();
2544
2545                 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2546                 BNX2X_ERR("Can't unload in closed or error state\n");
2547                 return -EINVAL;
2548         }
2549
2550         /*
2551          * It's important to set the bp->state to the value different from
2552          * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2553          * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2554          */
2555         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2556         smp_mb();
2557
2558         if (CNIC_LOADED(bp))
2559                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2560
2561         /* Stop Tx */
2562         bnx2x_tx_disable(bp);
2563         netdev_reset_tc(bp->dev);
2564
2565         bp->rx_mode = BNX2X_RX_MODE_NONE;
2566
2567         del_timer_sync(&bp->timer);
2568
2569         /* Set ALWAYS_ALIVE bit in shmem */
2570         bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2571
2572         bnx2x_drv_pulse(bp);
2573
2574         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2575         bnx2x_save_statistics(bp);
2576
2577         /* Cleanup the chip if needed */
2578         if (unload_mode != UNLOAD_RECOVERY)
2579                 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2580         else {
2581                 /* Send the UNLOAD_REQUEST to the MCP */
2582                 bnx2x_send_unload_req(bp, unload_mode);
2583
2584                 /*
2585                  * Prevent transactions to host from the functions on the
2586                  * engine that doesn't reset global blocks in case of global
2587                  * attention once gloabl blocks are reset and gates are opened
2588                  * (the engine which leader will perform the recovery
2589                  * last).
2590                  */
2591                 if (!CHIP_IS_E1x(bp))
2592                         bnx2x_pf_disable(bp);
2593
2594                 /* Disable HW interrupts, NAPI */
2595                 bnx2x_netif_stop(bp, 1);
2596                 /* Delete all NAPI objects */
2597                 bnx2x_del_all_napi(bp);
2598                 if (CNIC_LOADED(bp))
2599                         bnx2x_del_all_napi_cnic(bp);
2600                 /* Release IRQs */
2601                 bnx2x_free_irq(bp);
2602
2603                 /* Report UNLOAD_DONE to MCP */
2604                 bnx2x_send_unload_done(bp, false);
2605         }
2606
2607         /*
2608          * At this stage no more interrupts will arrive so we may safly clean
2609          * the queueable objects here in case they failed to get cleaned so far.
2610          */
2611         bnx2x_squeeze_objects(bp);
2612
2613         /* There should be no more pending SP commands at this stage */
2614         bp->sp_state = 0;
2615
2616         bp->port.pmf = 0;
2617
2618         /* Free SKBs, SGEs, TPA pool and driver internals */
2619         bnx2x_free_skbs(bp);
2620         if (CNIC_LOADED(bp))
2621                 bnx2x_free_skbs_cnic(bp);
2622         for_each_rx_queue(bp, i)
2623                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2624
2625         if (CNIC_LOADED(bp)) {
2626                 bnx2x_free_fp_mem_cnic(bp);
2627                 bnx2x_free_mem_cnic(bp);
2628         }
2629         bnx2x_free_mem(bp);
2630
2631         bp->state = BNX2X_STATE_CLOSED;
2632         bp->cnic_loaded = false;
2633
2634         /* Check if there are pending parity attentions. If there are - set
2635          * RECOVERY_IN_PROGRESS.
2636          */
2637         if (bnx2x_chk_parity_attn(bp, &global, false)) {
2638                 bnx2x_set_reset_in_progress(bp);
2639
2640                 /* Set RESET_IS_GLOBAL if needed */
2641                 if (global)
2642                         bnx2x_set_reset_global(bp);
2643         }
2644
2645
2646         /* The last driver must disable a "close the gate" if there is no
2647          * parity attention or "process kill" pending.
2648          */
2649         if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2650                 bnx2x_disable_close_the_gate(bp);
2651
2652         DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2653
2654         return 0;
2655 }
2656
2657 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2658 {
2659         u16 pmcsr;
2660
2661         /* If there is no power capability, silently succeed */
2662         if (!bp->pm_cap) {
2663                 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2664                 return 0;
2665         }
2666
2667         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2668
2669         switch (state) {
2670         case PCI_D0:
2671                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2672                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2673                                        PCI_PM_CTRL_PME_STATUS));
2674
2675                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2676                         /* delay required during transition out of D3hot */
2677                         msleep(20);
2678                 break;
2679
2680         case PCI_D3hot:
2681                 /* If there are other clients above don't
2682                    shut down the power */
2683                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2684                         return 0;
2685                 /* Don't shut down the power for emulation and FPGA */
2686                 if (CHIP_REV_IS_SLOW(bp))
2687                         return 0;
2688
2689                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2690                 pmcsr |= 3;
2691
2692                 if (bp->wol)
2693                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2694
2695                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2696                                       pmcsr);
2697
2698                 /* No more memory access after this point until
2699                 * device is brought back to D0.
2700                 */
2701                 break;
2702
2703         default:
2704                 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2705                 return -EINVAL;
2706         }
2707         return 0;
2708 }
2709
2710 /*
2711  * net_device service functions
2712  */
2713 int bnx2x_poll(struct napi_struct *napi, int budget)
2714 {
2715         int work_done = 0;
2716         u8 cos;
2717         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2718                                                  napi);
2719         struct bnx2x *bp = fp->bp;
2720
2721         while (1) {
2722 #ifdef BNX2X_STOP_ON_ERROR
2723                 if (unlikely(bp->panic)) {
2724                         napi_complete(napi);
2725                         return 0;
2726                 }
2727 #endif
2728
2729                 for_each_cos_in_tx_queue(fp, cos)
2730                         if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2731                                 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2732
2733
2734                 if (bnx2x_has_rx_work(fp)) {
2735                         work_done += bnx2x_rx_int(fp, budget - work_done);
2736
2737                         /* must not complete if we consumed full budget */
2738                         if (work_done >= budget)
2739                                 break;
2740                 }
2741
2742                 /* Fall out from the NAPI loop if needed */
2743                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2744
2745                         /* No need to update SB for FCoE L2 ring as long as
2746                          * it's connected to the default SB and the SB
2747                          * has been updated when NAPI was scheduled.
2748                          */
2749                         if (IS_FCOE_FP(fp)) {
2750                                 napi_complete(napi);
2751                                 break;
2752                         }
2753                         bnx2x_update_fpsb_idx(fp);
2754                         /* bnx2x_has_rx_work() reads the status block,
2755                          * thus we need to ensure that status block indices
2756                          * have been actually read (bnx2x_update_fpsb_idx)
2757                          * prior to this check (bnx2x_has_rx_work) so that
2758                          * we won't write the "newer" value of the status block
2759                          * to IGU (if there was a DMA right after
2760                          * bnx2x_has_rx_work and if there is no rmb, the memory
2761                          * reading (bnx2x_update_fpsb_idx) may be postponed
2762                          * to right before bnx2x_ack_sb). In this case there
2763                          * will never be another interrupt until there is
2764                          * another update of the status block, while there
2765                          * is still unhandled work.
2766                          */
2767                         rmb();
2768
2769                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2770                                 napi_complete(napi);
2771                                 /* Re-enable interrupts */
2772                                 DP(NETIF_MSG_RX_STATUS,
2773                                    "Update index to %d\n", fp->fp_hc_idx);
2774                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2775                                              le16_to_cpu(fp->fp_hc_idx),
2776                                              IGU_INT_ENABLE, 1);
2777                                 break;
2778                         }
2779                 }
2780         }
2781
2782         return work_done;
2783 }
2784
2785 /* we split the first BD into headers and data BDs
2786  * to ease the pain of our fellow microcode engineers
2787  * we use one mapping for both BDs
2788  */
2789 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2790                                    struct bnx2x_fp_txdata *txdata,
2791                                    struct sw_tx_bd *tx_buf,
2792                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
2793                                    u16 bd_prod, int nbd)
2794 {
2795         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2796         struct eth_tx_bd *d_tx_bd;
2797         dma_addr_t mapping;
2798         int old_len = le16_to_cpu(h_tx_bd->nbytes);
2799
2800         /* first fix first BD */
2801         h_tx_bd->nbd = cpu_to_le16(nbd);
2802         h_tx_bd->nbytes = cpu_to_le16(hlen);
2803
2804         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2805            h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2806
2807         /* now get a new data BD
2808          * (after the pbd) and fill it */
2809         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2810         d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2811
2812         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2813                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2814
2815         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2816         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2817         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2818
2819         /* this marks the BD as one that has no individual mapping */
2820         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2821
2822         DP(NETIF_MSG_TX_QUEUED,
2823            "TSO split data size is %d (%x:%x)\n",
2824            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2825
2826         /* update tx_bd */
2827         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2828
2829         return bd_prod;
2830 }
2831
2832 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2833 {
2834         if (fix > 0)
2835                 csum = (u16) ~csum_fold(csum_sub(csum,
2836                                 csum_partial(t_header - fix, fix, 0)));
2837
2838         else if (fix < 0)
2839                 csum = (u16) ~csum_fold(csum_add(csum,
2840                                 csum_partial(t_header, -fix, 0)));
2841
2842         return swab16(csum);
2843 }
2844
2845 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2846 {
2847         u32 rc;
2848
2849         if (skb->ip_summed != CHECKSUM_PARTIAL)
2850                 rc = XMIT_PLAIN;
2851
2852         else {
2853                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2854                         rc = XMIT_CSUM_V6;
2855                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2856                                 rc |= XMIT_CSUM_TCP;
2857
2858                 } else {
2859                         rc = XMIT_CSUM_V4;
2860                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2861                                 rc |= XMIT_CSUM_TCP;
2862                 }
2863         }
2864
2865         if (skb_is_gso_v6(skb))
2866                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2867         else if (skb_is_gso(skb))
2868                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2869
2870         return rc;
2871 }
2872
2873 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2874 /* check if packet requires linearization (packet is too fragmented)
2875    no need to check fragmentation if page size > 8K (there will be no
2876    violation to FW restrictions) */
2877 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2878                              u32 xmit_type)
2879 {
2880         int to_copy = 0;
2881         int hlen = 0;
2882         int first_bd_sz = 0;
2883
2884         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2885         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2886
2887                 if (xmit_type & XMIT_GSO) {
2888                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2889                         /* Check if LSO packet needs to be copied:
2890                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2891                         int wnd_size = MAX_FETCH_BD - 3;
2892                         /* Number of windows to check */
2893                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2894                         int wnd_idx = 0;
2895                         int frag_idx = 0;
2896                         u32 wnd_sum = 0;
2897
2898                         /* Headers length */
2899                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2900                                 tcp_hdrlen(skb);
2901
2902                         /* Amount of data (w/o headers) on linear part of SKB*/
2903                         first_bd_sz = skb_headlen(skb) - hlen;
2904
2905                         wnd_sum  = first_bd_sz;
2906
2907                         /* Calculate the first sum - it's special */
2908                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2909                                 wnd_sum +=
2910                                         skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2911
2912                         /* If there was data on linear skb data - check it */
2913                         if (first_bd_sz > 0) {
2914                                 if (unlikely(wnd_sum < lso_mss)) {
2915                                         to_copy = 1;
2916                                         goto exit_lbl;
2917                                 }
2918
2919                                 wnd_sum -= first_bd_sz;
2920                         }
2921
2922                         /* Others are easier: run through the frag list and
2923                            check all windows */
2924                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2925                                 wnd_sum +=
2926                           skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2927
2928                                 if (unlikely(wnd_sum < lso_mss)) {
2929                                         to_copy = 1;
2930                                         break;
2931                                 }
2932                                 wnd_sum -=
2933                                         skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2934                         }
2935                 } else {
2936                         /* in non-LSO too fragmented packet should always
2937                            be linearized */
2938                         to_copy = 1;
2939                 }
2940         }
2941
2942 exit_lbl:
2943         if (unlikely(to_copy))
2944                 DP(NETIF_MSG_TX_QUEUED,
2945                    "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
2946                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2947                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2948
2949         return to_copy;
2950 }
2951 #endif
2952
2953 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2954                                         u32 xmit_type)
2955 {
2956         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2957                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2958                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2959         if ((xmit_type & XMIT_GSO_V6) &&
2960             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2961                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2962 }
2963
2964 /**
2965  * bnx2x_set_pbd_gso - update PBD in GSO case.
2966  *
2967  * @skb:        packet skb
2968  * @pbd:        parse BD
2969  * @xmit_type:  xmit flags
2970  */
2971 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2972                                      struct eth_tx_parse_bd_e1x *pbd,
2973                                      u32 xmit_type)
2974 {
2975         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2976         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2977         pbd->tcp_flags = pbd_tcp_flags(skb);
2978
2979         if (xmit_type & XMIT_GSO_V4) {
2980                 pbd->ip_id = swab16(ip_hdr(skb)->id);
2981                 pbd->tcp_pseudo_csum =
2982                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2983                                                   ip_hdr(skb)->daddr,
2984                                                   0, IPPROTO_TCP, 0));
2985
2986         } else
2987                 pbd->tcp_pseudo_csum =
2988                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2989                                                 &ipv6_hdr(skb)->daddr,
2990                                                 0, IPPROTO_TCP, 0));
2991
2992         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2993 }
2994
2995 /**
2996  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2997  *
2998  * @bp:                 driver handle
2999  * @skb:                packet skb
3000  * @parsing_data:       data to be updated
3001  * @xmit_type:          xmit flags
3002  *
3003  * 57712 related
3004  */
3005 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3006         u32 *parsing_data, u32 xmit_type)
3007 {
3008         *parsing_data |=
3009                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3010                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3011                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
3012
3013         if (xmit_type & XMIT_CSUM_TCP) {
3014                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3015                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3016                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3017
3018                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3019         } else
3020                 /* We support checksum offload for TCP and UDP only.
3021                  * No need to pass the UDP header length - it's a constant.
3022                  */
3023                 return skb_transport_header(skb) +
3024                                 sizeof(struct udphdr) - skb->data;
3025 }
3026
3027 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3028         struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3029 {
3030         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3031
3032         if (xmit_type & XMIT_CSUM_V4)
3033                 tx_start_bd->bd_flags.as_bitfield |=
3034                                         ETH_TX_BD_FLAGS_IP_CSUM;
3035         else
3036                 tx_start_bd->bd_flags.as_bitfield |=
3037                                         ETH_TX_BD_FLAGS_IPV6;
3038
3039         if (!(xmit_type & XMIT_CSUM_TCP))
3040                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3041 }
3042
3043 /**
3044  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3045  *
3046  * @bp:         driver handle
3047  * @skb:        packet skb
3048  * @pbd:        parse BD to be updated
3049  * @xmit_type:  xmit flags
3050  */
3051 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3052         struct eth_tx_parse_bd_e1x *pbd,
3053         u32 xmit_type)
3054 {
3055         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3056
3057         /* for now NS flag is not used in Linux */
3058         pbd->global_data =
3059                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3060                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3061
3062         pbd->ip_hlen_w = (skb_transport_header(skb) -
3063                         skb_network_header(skb)) >> 1;
3064
3065         hlen += pbd->ip_hlen_w;
3066
3067         /* We support checksum offload for TCP and UDP only */
3068         if (xmit_type & XMIT_CSUM_TCP)
3069                 hlen += tcp_hdrlen(skb) / 2;
3070         else
3071                 hlen += sizeof(struct udphdr) / 2;
3072
3073         pbd->total_hlen_w = cpu_to_le16(hlen);
3074         hlen = hlen*2;
3075
3076         if (xmit_type & XMIT_CSUM_TCP) {
3077                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3078
3079         } else {
3080                 s8 fix = SKB_CS_OFF(skb); /* signed! */
3081
3082                 DP(NETIF_MSG_TX_QUEUED,
3083                    "hlen %d  fix %d  csum before fix %x\n",
3084                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3085
3086                 /* HW bug: fixup the CSUM */
3087                 pbd->tcp_pseudo_csum =
3088                         bnx2x_csum_fix(skb_transport_header(skb),
3089                                        SKB_CS(skb), fix);
3090
3091                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3092                    pbd->tcp_pseudo_csum);
3093         }
3094
3095         return hlen;
3096 }
3097
3098 /* called with netif_tx_lock
3099  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3100  * netif_wake_queue()
3101  */
3102 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3103 {
3104         struct bnx2x *bp = netdev_priv(dev);
3105
3106         struct netdev_queue *txq;
3107         struct bnx2x_fp_txdata *txdata;
3108         struct sw_tx_bd *tx_buf;
3109         struct eth_tx_start_bd *tx_start_bd, *first_bd;
3110         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3111         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3112         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3113         u32 pbd_e2_parsing_data = 0;
3114         u16 pkt_prod, bd_prod;
3115         int nbd, txq_index;
3116         dma_addr_t mapping;
3117         u32 xmit_type = bnx2x_xmit_type(bp, skb);
3118         int i;
3119         u8 hlen = 0;
3120         __le16 pkt_size = 0;
3121         struct ethhdr *eth;
3122         u8 mac_type = UNICAST_ADDRESS;
3123
3124 #ifdef BNX2X_STOP_ON_ERROR
3125         if (unlikely(bp->panic))
3126                 return NETDEV_TX_BUSY;
3127 #endif
3128
3129         txq_index = skb_get_queue_mapping(skb);
3130         txq = netdev_get_tx_queue(dev, txq_index);
3131
3132         BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3133
3134         txdata = &bp->bnx2x_txq[txq_index];
3135
3136         /* enable this debug print to view the transmission queue being used
3137         DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3138            txq_index, fp_index, txdata_index); */
3139
3140         /* enable this debug print to view the tranmission details
3141         DP(NETIF_MSG_TX_QUEUED,
3142            "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3143            txdata->cid, fp_index, txdata_index, txdata, fp); */
3144
3145         if (unlikely(bnx2x_tx_avail(bp, txdata) <
3146                         skb_shinfo(skb)->nr_frags +
3147                         BDS_PER_TX_PKT +
3148                         NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3149                 /* Handle special storage cases separately */
3150                 if (txdata->tx_ring_size == 0) {
3151                         struct bnx2x_eth_q_stats *q_stats =
3152                                 bnx2x_fp_qstats(bp, txdata->parent_fp);
3153                         q_stats->driver_filtered_tx_pkt++;
3154                         dev_kfree_skb(skb);
3155                         return NETDEV_TX_OK;
3156                 }
3157                         bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3158                         netif_tx_stop_queue(txq);
3159                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3160
3161                 return NETDEV_TX_BUSY;
3162         }
3163
3164         DP(NETIF_MSG_TX_QUEUED,
3165            "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x\n",
3166            txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3167            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3168
3169         eth = (struct ethhdr *)skb->data;
3170
3171         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3172         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3173                 if (is_broadcast_ether_addr(eth->h_dest))
3174                         mac_type = BROADCAST_ADDRESS;
3175                 else
3176                         mac_type = MULTICAST_ADDRESS;
3177         }
3178
3179 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3180         /* First, check if we need to linearize the skb (due to FW
3181            restrictions). No need to check fragmentation if page size > 8K
3182            (there will be no violation to FW restrictions) */
3183         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3184                 /* Statistics of linearization */
3185                 bp->lin_cnt++;
3186                 if (skb_linearize(skb) != 0) {
3187                         DP(NETIF_MSG_TX_QUEUED,
3188                            "SKB linearization failed - silently dropping this SKB\n");
3189                         dev_kfree_skb_any(skb);
3190                         return NETDEV_TX_OK;
3191                 }
3192         }
3193 #endif
3194         /* Map skb linear data for DMA */
3195         mapping = dma_map_single(&bp->pdev->dev, skb->data,
3196                                  skb_headlen(skb), DMA_TO_DEVICE);
3197         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3198                 DP(NETIF_MSG_TX_QUEUED,
3199                    "SKB mapping failed - silently dropping this SKB\n");
3200                 dev_kfree_skb_any(skb);
3201                 return NETDEV_TX_OK;
3202         }
3203         /*
3204         Please read carefully. First we use one BD which we mark as start,
3205         then we have a parsing info BD (used for TSO or xsum),
3206         and only then we have the rest of the TSO BDs.
3207         (don't forget to mark the last one as last,
3208         and to unmap only AFTER you write to the BD ...)
3209         And above all, all pdb sizes are in words - NOT DWORDS!
3210         */
3211
3212         /* get current pkt produced now - advance it just before sending packet
3213          * since mapping of pages may fail and cause packet to be dropped
3214          */
3215         pkt_prod = txdata->tx_pkt_prod;
3216         bd_prod = TX_BD(txdata->tx_bd_prod);
3217
3218         /* get a tx_buf and first BD
3219          * tx_start_bd may be changed during SPLIT,
3220          * but first_bd will always stay first
3221          */
3222         tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3223         tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3224         first_bd = tx_start_bd;
3225
3226         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3227         SET_FLAG(tx_start_bd->general_data,
3228                  ETH_TX_START_BD_PARSE_NBDS,
3229                  0);
3230
3231         /* header nbd */
3232         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3233
3234         /* remember the first BD of the packet */
3235         tx_buf->first_bd = txdata->tx_bd_prod;
3236         tx_buf->skb = skb;
3237         tx_buf->flags = 0;
3238
3239         DP(NETIF_MSG_TX_QUEUED,
3240            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3241            pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3242
3243         if (vlan_tx_tag_present(skb)) {
3244                 tx_start_bd->vlan_or_ethertype =
3245                     cpu_to_le16(vlan_tx_tag_get(skb));
3246                 tx_start_bd->bd_flags.as_bitfield |=
3247                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3248         } else
3249                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3250
3251         /* turn on parsing and get a BD */
3252         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3253
3254         if (xmit_type & XMIT_CSUM)
3255                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3256
3257         if (!CHIP_IS_E1x(bp)) {
3258                 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3259                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3260                 /* Set PBD in checksum offload case */
3261                 if (xmit_type & XMIT_CSUM)
3262                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3263                                                      &pbd_e2_parsing_data,
3264                                                      xmit_type);
3265                 if (IS_MF_SI(bp)) {
3266                         /*
3267                          * fill in the MAC addresses in the PBD - for local
3268                          * switching
3269                          */
3270                         bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3271                                               &pbd_e2->src_mac_addr_mid,
3272                                               &pbd_e2->src_mac_addr_lo,
3273                                               eth->h_source);
3274                         bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3275                                               &pbd_e2->dst_mac_addr_mid,
3276                                               &pbd_e2->dst_mac_addr_lo,
3277                                               eth->h_dest);
3278                 }
3279
3280                 SET_FLAG(pbd_e2_parsing_data,
3281                          ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3282         } else {
3283                 u16 global_data = 0;
3284                 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3285                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3286                 /* Set PBD in checksum offload case */
3287                 if (xmit_type & XMIT_CSUM)
3288                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3289
3290                 SET_FLAG(global_data,
3291                          ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3292                 pbd_e1x->global_data |= cpu_to_le16(global_data);
3293         }
3294
3295         /* Setup the data pointer of the first BD of the packet */
3296         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3297         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3298         nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3299         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3300         pkt_size = tx_start_bd->nbytes;
3301
3302         DP(NETIF_MSG_TX_QUEUED,
3303            "first bd @%p  addr (%x:%x)  nbd %d  nbytes %d  flags %x  vlan %x\n",
3304            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3305            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3306            tx_start_bd->bd_flags.as_bitfield,
3307            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3308
3309         if (xmit_type & XMIT_GSO) {
3310
3311                 DP(NETIF_MSG_TX_QUEUED,
3312                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
3313                    skb->len, hlen, skb_headlen(skb),
3314                    skb_shinfo(skb)->gso_size);
3315
3316                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3317
3318                 if (unlikely(skb_headlen(skb) > hlen))
3319                         bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3320                                                  &tx_start_bd, hlen,
3321                                                  bd_prod, ++nbd);
3322                 if (!CHIP_IS_E1x(bp))
3323                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3324                                              xmit_type);
3325                 else
3326                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3327         }
3328
3329         /* Set the PBD's parsing_data field if not zero
3330          * (for the chips newer than 57711).
3331          */
3332         if (pbd_e2_parsing_data)
3333                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3334
3335         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3336
3337         /* Handle fragmented skb */
3338         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3339                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3340
3341                 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3342                                            skb_frag_size(frag), DMA_TO_DEVICE);
3343                 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3344                         unsigned int pkts_compl = 0, bytes_compl = 0;
3345
3346                         DP(NETIF_MSG_TX_QUEUED,
3347                            "Unable to map page - dropping packet...\n");
3348
3349                         /* we need unmap all buffers already mapped
3350                          * for this SKB;
3351                          * first_bd->nbd need to be properly updated
3352                          * before call to bnx2x_free_tx_pkt
3353                          */
3354                         first_bd->nbd = cpu_to_le16(nbd);
3355                         bnx2x_free_tx_pkt(bp, txdata,
3356                                           TX_BD(txdata->tx_pkt_prod),
3357                                           &pkts_compl, &bytes_compl);
3358                         return NETDEV_TX_OK;
3359                 }
3360
3361                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3362                 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3363                 if (total_pkt_bd == NULL)
3364                         total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3365
3366                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3367                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3368                 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3369                 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3370                 nbd++;
3371
3372                 DP(NETIF_MSG_TX_QUEUED,
3373                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
3374                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3375                    le16_to_cpu(tx_data_bd->nbytes));
3376         }
3377
3378         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3379
3380         /* update with actual num BDs */
3381         first_bd->nbd = cpu_to_le16(nbd);
3382
3383         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3384
3385         /* now send a tx doorbell, counting the next BD
3386          * if the packet contains or ends with it
3387          */
3388         if (TX_BD_POFF(bd_prod) < nbd)
3389                 nbd++;
3390
3391         /* total_pkt_bytes should be set on the first data BD if
3392          * it's not an LSO packet and there is more than one
3393          * data BD. In this case pkt_size is limited by an MTU value.
3394          * However we prefer to set it for an LSO packet (while we don't
3395          * have to) in order to save some CPU cycles in a none-LSO
3396          * case, when we much more care about them.
3397          */
3398         if (total_pkt_bd != NULL)
3399                 total_pkt_bd->total_pkt_bytes = pkt_size;
3400
3401         if (pbd_e1x)
3402                 DP(NETIF_MSG_TX_QUEUED,
3403                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
3404                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3405                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3406                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3407                     le16_to_cpu(pbd_e1x->total_hlen_w));
3408         if (pbd_e2)
3409                 DP(NETIF_MSG_TX_QUEUED,
3410                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
3411                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3412                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3413                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3414                    pbd_e2->parsing_data);
3415         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
3416
3417         netdev_tx_sent_queue(txq, skb->len);
3418
3419         skb_tx_timestamp(skb);
3420
3421         txdata->tx_pkt_prod++;
3422         /*
3423          * Make sure that the BD data is updated before updating the producer
3424          * since FW might read the BD right after the producer is updated.
3425          * This is only applicable for weak-ordered memory model archs such
3426          * as IA-64. The following barrier is also mandatory since FW will
3427          * assumes packets must have BDs.
3428          */
3429         wmb();
3430
3431         txdata->tx_db.data.prod += nbd;
3432         barrier();
3433
3434         DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3435
3436         mmiowb();
3437
3438         txdata->tx_bd_prod += nbd;
3439
3440         if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3441                 netif_tx_stop_queue(txq);
3442
3443                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3444                  * ordering of set_bit() in netif_tx_stop_queue() and read of
3445                  * fp->bd_tx_cons */
3446                 smp_mb();
3447
3448                 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3449                 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3450                         netif_tx_wake_queue(txq);
3451         }
3452         txdata->tx_pkt++;
3453
3454         return NETDEV_TX_OK;
3455 }
3456
3457 /**
3458  * bnx2x_setup_tc - routine to configure net_device for multi tc
3459  *
3460  * @netdev: net device to configure
3461  * @tc: number of traffic classes to enable
3462  *
3463  * callback connected to the ndo_setup_tc function pointer
3464  */
3465 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3466 {
3467         int cos, prio, count, offset;
3468         struct bnx2x *bp = netdev_priv(dev);
3469
3470         /* setup tc must be called under rtnl lock */
3471         ASSERT_RTNL();
3472
3473         /* no traffic classes requested. aborting */
3474         if (!num_tc) {
3475                 netdev_reset_tc(dev);
3476                 return 0;
3477         }
3478
3479         /* requested to support too many traffic classes */
3480         if (num_tc > bp->max_cos) {
3481                 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3482                           num_tc, bp->max_cos);
3483                 return -EINVAL;
3484         }
3485
3486         /* declare amount of supported traffic classes */
3487         if (netdev_set_num_tc(dev, num_tc)) {
3488                 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3489                 return -EINVAL;
3490         }
3491
3492         /* configure priority to traffic class mapping */
3493         for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3494                 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3495                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3496                    "mapping priority %d to tc %d\n",
3497                    prio, bp->prio_to_cos[prio]);
3498         }
3499
3500
3501         /* Use this configuration to diffrentiate tc0 from other COSes
3502            This can be used for ets or pfc, and save the effort of setting
3503            up a multio class queue disc or negotiating DCBX with a switch
3504         netdev_set_prio_tc_map(dev, 0, 0);
3505         DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3506         for (prio = 1; prio < 16; prio++) {
3507                 netdev_set_prio_tc_map(dev, prio, 1);
3508                 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3509         } */
3510
3511         /* configure traffic class to transmission queue mapping */
3512         for (cos = 0; cos < bp->max_cos; cos++) {
3513                 count = BNX2X_NUM_ETH_QUEUES(bp);
3514                 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3515                 netdev_set_tc_queue(dev, cos, count, offset);
3516                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3517                    "mapping tc %d to offset %d count %d\n",
3518                    cos, offset, count);
3519         }
3520
3521         return 0;
3522 }
3523
3524 /* called with rtnl_lock */
3525 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3526 {
3527         struct sockaddr *addr = p;
3528         struct bnx2x *bp = netdev_priv(dev);
3529         int rc = 0;
3530
3531         if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3532                 BNX2X_ERR("Requested MAC address is not valid\n");
3533                 return -EINVAL;
3534         }
3535
3536         if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3537             !is_zero_ether_addr(addr->sa_data)) {
3538                 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3539                 return -EINVAL;
3540         }
3541
3542         if (netif_running(dev))  {
3543                 rc = bnx2x_set_eth_mac(bp, false);
3544                 if (rc)
3545                         return rc;
3546         }
3547
3548         dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3549         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3550
3551         if (netif_running(dev))
3552                 rc = bnx2x_set_eth_mac(bp, true);
3553
3554         return rc;
3555 }
3556
3557 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3558 {
3559         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3560         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3561         u8 cos;
3562
3563         /* Common */
3564
3565         if (IS_FCOE_IDX(fp_index)) {
3566                 memset(sb, 0, sizeof(union host_hc_status_block));
3567                 fp->status_blk_mapping = 0;
3568         } else {
3569                 /* status blocks */
3570                 if (!CHIP_IS_E1x(bp))
3571                         BNX2X_PCI_FREE(sb->e2_sb,
3572                                        bnx2x_fp(bp, fp_index,
3573                                                 status_blk_mapping),
3574                                        sizeof(struct host_hc_status_block_e2));
3575                 else
3576                         BNX2X_PCI_FREE(sb->e1x_sb,
3577                                        bnx2x_fp(bp, fp_index,
3578                                                 status_blk_mapping),
3579                                        sizeof(struct host_hc_status_block_e1x));
3580         }
3581
3582         /* Rx */
3583         if (!skip_rx_queue(bp, fp_index)) {
3584                 bnx2x_free_rx_bds(fp);
3585
3586                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3587                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3588                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3589                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
3590                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
3591
3592                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3593                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
3594                                sizeof(struct eth_fast_path_rx_cqe) *
3595                                NUM_RCQ_BD);
3596
3597                 /* SGE ring */
3598                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3599                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3600                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
3601                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3602         }
3603
3604         /* Tx */
3605         if (!skip_tx_queue(bp, fp_index)) {
3606                 /* fastpath tx rings: tx_buf tx_desc */
3607                 for_each_cos_in_tx_queue(fp, cos) {
3608                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3609
3610                         DP(NETIF_MSG_IFDOWN,
3611                            "freeing tx memory of fp %d cos %d cid %d\n",
3612                            fp_index, cos, txdata->cid);
3613
3614                         BNX2X_FREE(txdata->tx_buf_ring);
3615                         BNX2X_PCI_FREE(txdata->tx_desc_ring,
3616                                 txdata->tx_desc_mapping,
3617                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3618                 }
3619         }
3620         /* end of fastpath */
3621 }
3622
3623 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3624 {
3625         int i;
3626         for_each_cnic_queue(bp, i)
3627                 bnx2x_free_fp_mem_at(bp, i);
3628 }
3629
3630 void bnx2x_free_fp_mem(struct bnx2x *bp)
3631 {
3632         int i;
3633         for_each_eth_queue(bp, i)
3634                 bnx2x_free_fp_mem_at(bp, i);
3635 }
3636
3637 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3638 {
3639         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3640         if (!CHIP_IS_E1x(bp)) {
3641                 bnx2x_fp(bp, index, sb_index_values) =
3642                         (__le16 *)status_blk.e2_sb->sb.index_values;
3643                 bnx2x_fp(bp, index, sb_running_index) =
3644                         (__le16 *)status_blk.e2_sb->sb.running_index;
3645         } else {
3646                 bnx2x_fp(bp, index, sb_index_values) =
3647                         (__le16 *)status_blk.e1x_sb->sb.index_values;
3648                 bnx2x_fp(bp, index, sb_running_index) =
3649                         (__le16 *)status_blk.e1x_sb->sb.running_index;
3650         }
3651 }
3652
3653 /* Returns the number of actually allocated BDs */
3654 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3655                               int rx_ring_size)
3656 {
3657         struct bnx2x *bp = fp->bp;
3658         u16 ring_prod, cqe_ring_prod;
3659         int i, failure_cnt = 0;
3660
3661         fp->rx_comp_cons = 0;
3662         cqe_ring_prod = ring_prod = 0;
3663
3664         /* This routine is called only during fo init so
3665          * fp->eth_q_stats.rx_skb_alloc_failed = 0
3666          */
3667         for (i = 0; i < rx_ring_size; i++) {
3668                 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3669                         failure_cnt++;
3670                         continue;
3671                 }
3672                 ring_prod = NEXT_RX_IDX(ring_prod);
3673                 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3674                 WARN_ON(ring_prod <= (i - failure_cnt));
3675         }
3676
3677         if (failure_cnt)
3678                 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3679                           i - failure_cnt, fp->index);
3680
3681         fp->rx_bd_prod = ring_prod;
3682         /* Limit the CQE producer by the CQE ring size */
3683         fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3684                                cqe_ring_prod);
3685         fp->rx_pkt = fp->rx_calls = 0;
3686
3687         bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3688
3689         return i - failure_cnt;
3690 }
3691
3692 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3693 {
3694         int i;
3695
3696         for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3697                 struct eth_rx_cqe_next_page *nextpg;
3698
3699                 nextpg = (struct eth_rx_cqe_next_page *)
3700                         &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3701                 nextpg->addr_hi =
3702                         cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3703                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3704                 nextpg->addr_lo =
3705                         cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3706                                    BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3707         }
3708 }
3709
3710 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3711 {
3712         union host_hc_status_block *sb;
3713         struct bnx2x_fastpath *fp = &bp->fp[index];
3714         int ring_size = 0;
3715         u8 cos;
3716         int rx_ring_size = 0;
3717
3718         if (!bp->rx_ring_size &&
3719             (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3720                 rx_ring_size = MIN_RX_SIZE_NONTPA;
3721                 bp->rx_ring_size = rx_ring_size;
3722         } else if (!bp->rx_ring_size) {
3723                 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3724
3725                 if (CHIP_IS_E3(bp)) {
3726                         u32 cfg = SHMEM_RD(bp,
3727                                            dev_info.port_hw_config[BP_PORT(bp)].
3728                                            default_cfg);
3729
3730                         /* Decrease ring size for 1G functions */
3731                         if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3732                             PORT_HW_CFG_NET_SERDES_IF_SGMII)
3733                                 rx_ring_size /= 10;
3734                 }
3735
3736                 /* allocate at least number of buffers required by FW */
3737                 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3738                                      MIN_RX_SIZE_TPA, rx_ring_size);
3739
3740                 bp->rx_ring_size = rx_ring_size;
3741         } else /* if rx_ring_size specified - use it */
3742                 rx_ring_size = bp->rx_ring_size;
3743
3744         /* Common */
3745         sb = &bnx2x_fp(bp, index, status_blk);
3746
3747         if (!IS_FCOE_IDX(index)) {
3748                 /* status blocks */
3749                 if (!CHIP_IS_E1x(bp))
3750                         BNX2X_PCI_ALLOC(sb->e2_sb,
3751                                 &bnx2x_fp(bp, index, status_blk_mapping),
3752                                 sizeof(struct host_hc_status_block_e2));
3753                 else
3754                         BNX2X_PCI_ALLOC(sb->e1x_sb,
3755                                 &bnx2x_fp(bp, index, status_blk_mapping),
3756                             sizeof(struct host_hc_status_block_e1x));
3757         }
3758
3759         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3760          * set shortcuts for it.
3761          */
3762         if (!IS_FCOE_IDX(index))
3763                 set_sb_shortcuts(bp, index);
3764
3765         /* Tx */
3766         if (!skip_tx_queue(bp, index)) {
3767                 /* fastpath tx rings: tx_buf tx_desc */
3768                 for_each_cos_in_tx_queue(fp, cos) {
3769                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3770
3771                         DP(NETIF_MSG_IFUP,
3772                            "allocating tx memory of fp %d cos %d\n",
3773                            index, cos);
3774
3775                         BNX2X_ALLOC(txdata->tx_buf_ring,
3776                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3777                         BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3778                                 &txdata->tx_desc_mapping,
3779                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3780                 }
3781         }
3782
3783         /* Rx */
3784         if (!skip_rx_queue(bp, index)) {
3785                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3786                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3787                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3788                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3789                                 &bnx2x_fp(bp, index, rx_desc_mapping),
3790                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3791
3792                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3793                                 &bnx2x_fp(bp, index, rx_comp_mapping),
3794                                 sizeof(struct eth_fast_path_rx_cqe) *
3795                                 NUM_RCQ_BD);
3796
3797                 /* SGE ring */
3798                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3799                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3800                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3801                                 &bnx2x_fp(bp, index, rx_sge_mapping),
3802                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3803                 /* RX BD ring */
3804                 bnx2x_set_next_page_rx_bd(fp);
3805
3806                 /* CQ ring */
3807                 bnx2x_set_next_page_rx_cq(fp);
3808
3809                 /* BDs */
3810                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3811                 if (ring_size < rx_ring_size)
3812                         goto alloc_mem_err;
3813         }
3814
3815         return 0;
3816
3817 /* handles low memory cases */
3818 alloc_mem_err:
3819         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3820                                                 index, ring_size);
3821         /* FW will drop all packets if queue is not big enough,
3822          * In these cases we disable the queue
3823          * Min size is different for OOO, TPA and non-TPA queues
3824          */
3825         if (ring_size < (fp->disable_tpa ?
3826                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3827                         /* release memory allocated for this queue */
3828                         bnx2x_free_fp_mem_at(bp, index);
3829                         return -ENOMEM;
3830         }
3831         return 0;
3832 }
3833
3834 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3835 {
3836         if (!NO_FCOE(bp))
3837                 /* FCoE */
3838                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3839                         /* we will fail load process instead of mark
3840                          * NO_FCOE_FLAG
3841                          */
3842                         return -ENOMEM;
3843
3844         return 0;
3845 }
3846
3847 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3848 {
3849         int i;
3850
3851         /* 1. Allocate FP for leading - fatal if error
3852          * 2. Allocate RSS - fix number of queues if error
3853          */
3854
3855         /* leading */
3856         if (bnx2x_alloc_fp_mem_at(bp, 0))
3857                 return -ENOMEM;
3858
3859         /* RSS */
3860         for_each_nondefault_eth_queue(bp, i)
3861                 if (bnx2x_alloc_fp_mem_at(bp, i))
3862                         break;
3863
3864         /* handle memory failures */
3865         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3866                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3867
3868                 WARN_ON(delta < 0);
3869                 if (CNIC_SUPPORT(bp))
3870                         /* move non eth FPs next to last eth FP
3871                          * must be done in that order
3872                          * FCOE_IDX < FWD_IDX < OOO_IDX
3873                          */
3874
3875                         /* move FCoE fp even NO_FCOE_FLAG is on */
3876                         bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3877                 bp->num_ethernet_queues -= delta;
3878                 bp->num_queues = bp->num_ethernet_queues +
3879                                  bp->num_cnic_queues;
3880                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3881                           bp->num_queues + delta, bp->num_queues);
3882         }
3883
3884         return 0;
3885 }
3886
3887 void bnx2x_free_mem_bp(struct bnx2x *bp)
3888 {
3889         kfree(bp->fp->tpa_info);
3890         kfree(bp->fp);
3891         kfree(bp->sp_objs);
3892         kfree(bp->fp_stats);
3893         kfree(bp->bnx2x_txq);
3894         kfree(bp->msix_table);
3895         kfree(bp->ilt);
3896 }
3897
3898 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
3899 {
3900         struct bnx2x_fastpath *fp;
3901         struct msix_entry *tbl;
3902         struct bnx2x_ilt *ilt;
3903         int msix_table_size = 0;
3904         int fp_array_size, txq_array_size;
3905         int i;
3906
3907         /*
3908          * The biggest MSI-X table we might need is as a maximum number of fast
3909          * path IGU SBs plus default SB (for PF).
3910          */
3911         msix_table_size = bp->igu_sb_cnt + 1;
3912
3913         /* fp array: RSS plus CNIC related L2 queues */
3914         fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3915         BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3916
3917         fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3918         if (!fp)
3919                 goto alloc_err;
3920         for (i = 0; i < fp_array_size; i++) {
3921                 fp[i].tpa_info =
3922                         kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3923                                 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3924                 if (!(fp[i].tpa_info))
3925                         goto alloc_err;
3926         }
3927
3928         bp->fp = fp;
3929
3930         /* allocate sp objs */
3931         bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3932                               GFP_KERNEL);
3933         if (!bp->sp_objs)
3934                 goto alloc_err;
3935
3936         /* allocate fp_stats */
3937         bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3938                                GFP_KERNEL);
3939         if (!bp->fp_stats)
3940                 goto alloc_err;
3941
3942         /* Allocate memory for the transmission queues array */
3943         txq_array_size =
3944                 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3945         BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3946
3947         bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3948                                 GFP_KERNEL);
3949         if (!bp->bnx2x_txq)
3950                 goto alloc_err;
3951
3952         /* msix table */
3953         tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3954         if (!tbl)
3955                 goto alloc_err;
3956         bp->msix_table = tbl;
3957
3958         /* ilt */
3959         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3960         if (!ilt)
3961                 goto alloc_err;
3962         bp->ilt = ilt;
3963
3964         return 0;
3965 alloc_err:
3966         bnx2x_free_mem_bp(bp);
3967         return -ENOMEM;
3968
3969 }
3970
3971 int bnx2x_reload_if_running(struct net_device *dev)
3972 {
3973         struct bnx2x *bp = netdev_priv(dev);
3974
3975         if (unlikely(!netif_running(dev)))
3976                 return 0;
3977
3978         bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
3979         return bnx2x_nic_load(bp, LOAD_NORMAL);
3980 }
3981
3982 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3983 {
3984         u32 sel_phy_idx = 0;
3985         if (bp->link_params.num_phys <= 1)
3986                 return INT_PHY;
3987
3988         if (bp->link_vars.link_up) {
3989                 sel_phy_idx = EXT_PHY1;
3990                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3991                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3992                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3993                         sel_phy_idx = EXT_PHY2;
3994         } else {
3995
3996                 switch (bnx2x_phy_selection(&bp->link_params)) {
3997                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3998                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3999                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4000                        sel_phy_idx = EXT_PHY1;
4001                        break;
4002                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4003                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4004                        sel_phy_idx = EXT_PHY2;
4005                        break;
4006                 }
4007         }
4008
4009         return sel_phy_idx;
4010
4011 }
4012 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4013 {
4014         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4015         /*
4016          * The selected actived PHY is always after swapping (in case PHY
4017          * swapping is enabled). So when swapping is enabled, we need to reverse
4018          * the configuration
4019          */
4020
4021         if (bp->link_params.multi_phy_config &
4022             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4023                 if (sel_phy_idx == EXT_PHY1)
4024                         sel_phy_idx = EXT_PHY2;
4025                 else if (sel_phy_idx == EXT_PHY2)
4026                         sel_phy_idx = EXT_PHY1;
4027         }
4028         return LINK_CONFIG_IDX(sel_phy_idx);
4029 }
4030
4031 #ifdef NETDEV_FCOE_WWNN
4032 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4033 {
4034         struct bnx2x *bp = netdev_priv(dev);
4035         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4036
4037         switch (type) {
4038         case NETDEV_FCOE_WWNN:
4039                 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4040                                 cp->fcoe_wwn_node_name_lo);
4041                 break;
4042         case NETDEV_FCOE_WWPN:
4043                 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4044                                 cp->fcoe_wwn_port_name_lo);
4045                 break;
4046         default:
4047                 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4048                 return -EINVAL;
4049         }
4050
4051         return 0;
4052 }
4053 #endif
4054
4055 /* called with rtnl_lock */
4056 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4057 {
4058         struct bnx2x *bp = netdev_priv(dev);
4059
4060         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4061                 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4062                 return -EAGAIN;
4063         }
4064
4065         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4066             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4067                 BNX2X_ERR("Can't support requested MTU size\n");
4068                 return -EINVAL;
4069         }
4070
4071         /* This does not race with packet allocation
4072          * because the actual alloc size is
4073          * only updated as part of load
4074          */
4075         dev->mtu = new_mtu;
4076
4077         return bnx2x_reload_if_running(dev);
4078 }
4079
4080 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4081                                      netdev_features_t features)
4082 {
4083         struct bnx2x *bp = netdev_priv(dev);
4084
4085         /* TPA requires Rx CSUM offloading */
4086         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4087                 features &= ~NETIF_F_LRO;
4088                 features &= ~NETIF_F_GRO;
4089         }
4090
4091         return features;
4092 }
4093
4094 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4095 {
4096         struct bnx2x *bp = netdev_priv(dev);
4097         u32 flags = bp->flags;
4098         bool bnx2x_reload = false;
4099
4100         if (features & NETIF_F_LRO)
4101                 flags |= TPA_ENABLE_FLAG;
4102         else
4103                 flags &= ~TPA_ENABLE_FLAG;
4104
4105         if (features & NETIF_F_GRO)
4106                 flags |= GRO_ENABLE_FLAG;
4107         else
4108                 flags &= ~GRO_ENABLE_FLAG;
4109
4110         if (features & NETIF_F_LOOPBACK) {
4111                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4112                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
4113                         bnx2x_reload = true;
4114                 }
4115         } else {
4116                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4117                         bp->link_params.loopback_mode = LOOPBACK_NONE;
4118                         bnx2x_reload = true;
4119                 }
4120         }
4121
4122         if (flags ^ bp->flags) {
4123                 bp->flags = flags;
4124                 bnx2x_reload = true;
4125         }
4126
4127         if (bnx2x_reload) {
4128                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4129                         return bnx2x_reload_if_running(dev);
4130                 /* else: bnx2x_nic_load() will be called at end of recovery */
4131         }
4132
4133         return 0;
4134 }
4135
4136 void bnx2x_tx_timeout(struct net_device *dev)
4137 {
4138         struct bnx2x *bp = netdev_priv(dev);
4139
4140 #ifdef BNX2X_STOP_ON_ERROR
4141         if (!bp->panic)
4142                 bnx2x_panic();
4143 #endif
4144
4145         smp_mb__before_clear_bit();
4146         set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4147         smp_mb__after_clear_bit();
4148
4149         /* This allows the netif to be shutdown gracefully before resetting */
4150         schedule_delayed_work(&bp->sp_rtnl_task, 0);
4151 }
4152
4153 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4154 {
4155         struct net_device *dev = pci_get_drvdata(pdev);
4156         struct bnx2x *bp;
4157
4158         if (!dev) {
4159                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4160                 return -ENODEV;
4161         }
4162         bp = netdev_priv(dev);
4163
4164         rtnl_lock();
4165
4166         pci_save_state(pdev);
4167
4168         if (!netif_running(dev)) {
4169                 rtnl_unlock();
4170                 return 0;
4171         }
4172
4173         netif_device_detach(dev);
4174
4175         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4176
4177         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4178
4179         rtnl_unlock();
4180
4181         return 0;
4182 }
4183
4184 int bnx2x_resume(struct pci_dev *pdev)
4185 {
4186         struct net_device *dev = pci_get_drvdata(pdev);
4187         struct bnx2x *bp;
4188         int rc;
4189
4190         if (!dev) {
4191                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4192                 return -ENODEV;
4193         }
4194         bp = netdev_priv(dev);
4195
4196         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4197                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4198                 return -EAGAIN;
4199         }
4200
4201         rtnl_lock();
4202
4203         pci_restore_state(pdev);
4204
4205         if (!netif_running(dev)) {
4206                 rtnl_unlock();
4207                 return 0;
4208         }
4209
4210         bnx2x_set_power_state(bp, PCI_D0);
4211         netif_device_attach(dev);
4212
4213         rc = bnx2x_nic_load(bp, LOAD_OPEN);
4214
4215         rtnl_unlock();
4216
4217         return rc;
4218 }
4219
4220
4221 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4222                               u32 cid)
4223 {
4224         /* ustorm cxt validation */
4225         cxt->ustorm_ag_context.cdu_usage =
4226                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4227                         CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4228         /* xcontext validation */
4229         cxt->xstorm_ag_context.cdu_reserved =
4230                 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4231                         CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4232 }
4233
4234 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4235                                     u8 fw_sb_id, u8 sb_index,
4236                                     u8 ticks)
4237 {
4238
4239         u32 addr = BAR_CSTRORM_INTMEM +
4240                    CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4241         REG_WR8(bp, addr, ticks);
4242         DP(NETIF_MSG_IFUP,
4243            "port %x fw_sb_id %d sb_index %d ticks %d\n",
4244            port, fw_sb_id, sb_index, ticks);
4245 }
4246
4247 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4248                                     u16 fw_sb_id, u8 sb_index,
4249                                     u8 disable)
4250 {
4251         u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4252         u32 addr = BAR_CSTRORM_INTMEM +
4253                    CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4254         u16 flags = REG_RD16(bp, addr);
4255         /* clear and set */
4256         flags &= ~HC_INDEX_DATA_HC_ENABLED;
4257         flags |= enable_flag;
4258         REG_WR16(bp, addr, flags);
4259         DP(NETIF_MSG_IFUP,
4260            "port %x fw_sb_id %d sb_index %d disable %d\n",
4261            port, fw_sb_id, sb_index, disable);
4262 }
4263
4264 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4265                                     u8 sb_index, u8 disable, u16 usec)
4266 {
4267         int port = BP_PORT(bp);
4268         u8 ticks = usec / BNX2X_BTR;
4269
4270         storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4271
4272         disable = disable ? 1 : (usec ? 0 : 1);
4273         storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4274 }