bnx2x: clamp num_queues to prevent passing a negative value
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
076bb0c8 27#include <net/busy_poll.h>
c0cba59e 28#include <linux/prefetch.h>
9f6c9258 29#include "bnx2x_cmn.h"
523224a3 30#include "bnx2x_init.h"
042181f5 31#include "bnx2x_sp.h"
9f6c9258 32
a8f47eb7 33static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36static int bnx2x_poll(struct napi_struct *napi, int budget);
37
38static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
39{
40 int i;
41
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
47 }
48}
49
50static void bnx2x_add_all_napi(struct bnx2x *bp)
51{
52 int i;
53
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
59 }
60}
61
62static int bnx2x_calc_num_queues(struct bnx2x *bp)
63{
7d0445d6
MS
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
66 return nq;
a8f47eb7 67}
68
b3b83c3f
DK
69/**
70 * bnx2x_move_fp - move content of the fastpath structure.
71 *
72 * @bp: driver handle
73 * @from: source FP index
74 * @to: destination FP index
75 *
76 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
77 * intact. This is done by first copying the napi struct from
78 * the target to the source, and then mem copying the entire
65565884
MS
79 * source onto the target. Update txdata pointers and related
80 * content.
b3b83c3f
DK
81 */
82static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
83{
84 struct bnx2x_fastpath *from_fp = &bp->fp[from];
85 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
86 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
87 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
88 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
89 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
90 int old_max_eth_txqs, new_max_eth_txqs;
91 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 92 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
93
94 /* Copy the NAPI object as it has been already initialized */
95 from_fp->napi = to_fp->napi;
96
b3b83c3f
DK
97 /* Move bnx2x_fastpath contents */
98 memcpy(to_fp, from_fp, sizeof(*to_fp));
99 to_fp->index = to;
65565884 100
34d5626a
YM
101 /* Retain the tpa_info of the original `to' version as we don't want
102 * 2 FPs to contain the same tpa_info pointer.
103 */
104 to_fp->tpa_info = old_tpa_info;
105
15192a8c
BW
106 /* move sp_objs contents as well, as their indices match fp ones */
107 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
108
109 /* move fp_stats contents as well, as their indices match fp ones */
110 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
111
65565884
MS
112 /* Update txdata pointers in fp and move txdata content accordingly:
113 * Each fp consumes 'max_cos' txdata structures, so the index should be
114 * decremented by max_cos x delta.
115 */
116
117 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
118 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
119 (bp)->max_cos;
120 if (from == FCOE_IDX(bp)) {
121 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
122 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
123 }
124
4864a16a
YM
125 memcpy(&bp->bnx2x_txq[new_txdata_index],
126 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
127 sizeof(struct bnx2x_fp_txdata));
128 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
129}
130
8ca5e17e
AE
131/**
132 * bnx2x_fill_fw_str - Fill buffer with FW version string.
133 *
134 * @bp: driver handle
135 * @buf: character buffer to fill with the fw name
136 * @buf_len: length of the above buffer
137 *
138 */
139void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
140{
141 if (IS_PF(bp)) {
142 u8 phy_fw_ver[PHY_FW_VER_LEN];
143
144 phy_fw_ver[0] = '\0';
145 bnx2x_get_ext_phy_fw_version(&bp->link_params,
146 phy_fw_ver, PHY_FW_VER_LEN);
147 strlcpy(buf, bp->fw_ver, buf_len);
148 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
149 "bc %d.%d.%d%s%s",
150 (bp->common.bc_ver & 0xff0000) >> 16,
151 (bp->common.bc_ver & 0xff00) >> 8,
152 (bp->common.bc_ver & 0xff),
153 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
154 } else {
6411280a 155 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
156 }
157}
158
4864a16a
YM
159/**
160 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
161 *
162 * @bp: driver handle
163 * @delta: number of eth queues which were not allocated
164 */
165static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
166{
167 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
168
169 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 170 * backward along the array could cause memory to be overridden
4864a16a
YM
171 */
172 for (cos = 1; cos < bp->max_cos; cos++) {
173 for (i = 0; i < old_eth_num - delta; i++) {
174 struct bnx2x_fastpath *fp = &bp->fp[i];
175 int new_idx = cos * (old_eth_num - delta) + i;
176
177 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
178 sizeof(struct bnx2x_fp_txdata));
179 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
180 }
181 }
182}
183
a8f47eb7 184int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
619c5cb6 185
9f6c9258
DK
186/* free skb in the packet ring at pos idx
187 * return idx of last bd freed
188 */
6383c0b3 189static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
190 u16 idx, unsigned int *pkts_compl,
191 unsigned int *bytes_compl)
9f6c9258 192{
6383c0b3 193 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
194 struct eth_tx_start_bd *tx_start_bd;
195 struct eth_tx_bd *tx_data_bd;
196 struct sk_buff *skb = tx_buf->skb;
197 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
198 int nbd;
95e92fd4 199 u16 split_bd_len = 0;
9f6c9258
DK
200
201 /* prefetch skb end pointer to speedup dev_kfree_skb() */
202 prefetch(&skb->end);
203
51c1a580 204 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 205 txdata->txq_index, idx, tx_buf, skb);
9f6c9258 206
6383c0b3 207 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258
DK
208
209 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
210#ifdef BNX2X_STOP_ON_ERROR
211 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
212 BNX2X_ERR("BAD nbd!\n");
213 bnx2x_panic();
214 }
215#endif
216 new_cons = nbd + tx_buf->first_bd;
217
218 /* Get the next bd */
219 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
220
221 /* Skip a parse bd... */
222 --nbd;
223 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
224
95e92fd4 225 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
9f6c9258 226 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
95e92fd4
MS
227 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
228 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
9f6c9258
DK
229 --nbd;
230 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
231 }
232
95e92fd4
MS
233 /* unmap first bd */
234 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
235 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
236 DMA_TO_DEVICE);
237
9f6c9258
DK
238 /* now free frags */
239 while (nbd > 0) {
240
6383c0b3 241 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
242 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
243 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
244 if (--nbd)
245 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
246 }
247
248 /* release skb */
249 WARN_ON(!skb);
d8290ae5 250 if (likely(skb)) {
2df1a70a
TH
251 (*pkts_compl)++;
252 (*bytes_compl) += skb->len;
253 }
d8290ae5 254
40955532 255 dev_kfree_skb_any(skb);
9f6c9258
DK
256 tx_buf->first_bd = 0;
257 tx_buf->skb = NULL;
258
259 return new_cons;
260}
261
6383c0b3 262int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 263{
9f6c9258 264 struct netdev_queue *txq;
6383c0b3 265 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 266 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
267
268#ifdef BNX2X_STOP_ON_ERROR
269 if (unlikely(bp->panic))
270 return -1;
271#endif
272
6383c0b3
AE
273 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
274 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
275 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
276
277 while (sw_cons != hw_cons) {
278 u16 pkt_cons;
279
280 pkt_cons = TX_BD(sw_cons);
281
51c1a580
MS
282 DP(NETIF_MSG_TX_DONE,
283 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 284 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 285
2df1a70a 286 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 287 &pkts_compl, &bytes_compl);
2df1a70a 288
9f6c9258
DK
289 sw_cons++;
290 }
291
2df1a70a
TH
292 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
293
6383c0b3
AE
294 txdata->tx_pkt_cons = sw_cons;
295 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
296
297 /* Need to make the tx_bd_cons update visible to start_xmit()
298 * before checking for netif_tx_queue_stopped(). Without the
299 * memory barrier, there is a small possibility that
300 * start_xmit() will miss it and cause the queue to be stopped
301 * forever.
619c5cb6
VZ
302 * On the other hand we need an rmb() here to ensure the proper
303 * ordering of bit testing in the following
304 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
305 */
306 smp_mb();
307
9f6c9258 308 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 309 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
310 * while it's empty. This could have happen if rx_action() gets
311 * suspended in bnx2x_tx_int() after the condition before
312 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
313 *
314 * stops the queue->sees fresh tx_bd_cons->releases the queue->
315 * sends some packets consuming the whole queue again->
316 * stops the queue
317 */
318
319 __netif_tx_lock(txq, smp_processor_id());
320
321 if ((netif_tx_queue_stopped(txq)) &&
322 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 323 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
324 netif_tx_wake_queue(txq);
325
326 __netif_tx_unlock(txq);
327 }
328 return 0;
329}
330
331static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
332 u16 idx)
333{
334 u16 last_max = fp->last_max_sge;
335
336 if (SUB_S16(idx, last_max) > 0)
337 fp->last_max_sge = idx;
338}
339
621b4d66
DK
340static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
341 u16 sge_len,
342 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
343{
344 struct bnx2x *bp = fp->bp;
9f6c9258
DK
345 u16 last_max, last_elem, first_elem;
346 u16 delta = 0;
347 u16 i;
348
349 if (!sge_len)
350 return;
351
352 /* First mark all used pages */
353 for (i = 0; i < sge_len; i++)
619c5cb6 354 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 355 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
356
357 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 358 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
359
360 /* Here we assume that the last SGE index is the biggest */
361 prefetch((void *)(fp->sge_mask));
523224a3 362 bnx2x_update_last_max_sge(fp,
621b4d66 363 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
364
365 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
366 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
367 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
368
369 /* If ring is not full */
370 if (last_elem + 1 != first_elem)
371 last_elem++;
372
373 /* Now update the prod */
374 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
375 if (likely(fp->sge_mask[i]))
376 break;
377
619c5cb6
VZ
378 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
379 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
380 }
381
382 if (delta > 0) {
383 fp->rx_sge_prod += delta;
384 /* clear page-end entries */
385 bnx2x_clear_sge_mask_next_elems(fp);
386 }
387
388 DP(NETIF_MSG_RX_STATUS,
389 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
390 fp->last_max_sge, fp->rx_sge_prod);
391}
392
2de67439 393/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
394 * CQE (calculated by HW).
395 */
396static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb 397 const struct eth_fast_path_rx_cqe *cqe,
5495ab75 398 enum pkt_hash_types *rxhash_type)
e52fcb24 399{
2de67439 400 /* Get Toeplitz hash from CQE */
e52fcb24 401 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
402 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
403 enum eth_rss_hash_type htype;
404
405 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
5495ab75
TH
406 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
407 (htype == TCP_IPV6_HASH_TYPE)) ?
408 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
409
e52fcb24 410 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb 411 }
5495ab75 412 *rxhash_type = PKT_HASH_TYPE_NONE;
e52fcb24
ED
413 return 0;
414}
415
9f6c9258 416static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 417 u16 cons, u16 prod,
619c5cb6 418 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
419{
420 struct bnx2x *bp = fp->bp;
421 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
422 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
423 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
424 dma_addr_t mapping;
619c5cb6
VZ
425 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
426 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 427
619c5cb6
VZ
428 /* print error if current state != stop */
429 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
430 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
431
e52fcb24 432 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 433 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 434 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
435 fp->rx_buf_size, DMA_FROM_DEVICE);
436 /*
437 * ...if it fails - move the skb from the consumer to the producer
438 * and set the current aggregation state as ERROR to drop it
439 * when TPA_STOP arrives.
440 */
441
442 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443 /* Move the BD from the consumer to the producer */
e52fcb24 444 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
445 tpa_info->tpa_state = BNX2X_TPA_ERROR;
446 return;
447 }
9f6c9258 448
e52fcb24
ED
449 /* move empty data from pool to prod */
450 prod_rx_buf->data = first_buf->data;
619c5cb6 451 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 452 /* point prod_bd to new data */
9f6c9258
DK
453 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
454 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
455
619c5cb6
VZ
456 /* move partial skb from cons to pool (don't unmap yet) */
457 *first_buf = *cons_rx_buf;
458
459 /* mark bin state as START */
460 tpa_info->parsing_flags =
461 le16_to_cpu(cqe->pars_flags.flags);
462 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
463 tpa_info->tpa_state = BNX2X_TPA_START;
464 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
465 tpa_info->placement_offset = cqe->placement_offset;
5495ab75 466 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
621b4d66
DK
467 if (fp->mode == TPA_MODE_GRO) {
468 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 469 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
470 tpa_info->gro_size = gro_size;
471 }
619c5cb6 472
9f6c9258
DK
473#ifdef BNX2X_STOP_ON_ERROR
474 fp->tpa_queue_used |= (1 << queue);
475#ifdef _ASM_GENERIC_INT_L64_H
476 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
477#else
478 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
479#endif
480 fp->tpa_queue_used);
481#endif
482}
483
e4e3c02a
VZ
484/* Timestamp option length allowed for TPA aggregation:
485 *
486 * nop nop kind length echo val
487 */
488#define TPA_TSTAMP_OPT_LEN 12
489/**
cbf1de72 490 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 491 *
cbf1de72 492 * @skb: packet skb
e8920674
DK
493 * @parsing_flags: parsing flags from the START CQE
494 * @len_on_bd: total length of the first packet for the
495 * aggregation.
cbf1de72 496 * @pkt_len: length of all segments
e8920674
DK
497 *
498 * Approximate value of the MSS for this aggregation calculated using
499 * the first packet of it.
2de67439 500 * Compute number of aggregated segments, and gso_type.
e4e3c02a 501 */
cbf1de72 502static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
503 u16 len_on_bd, unsigned int pkt_len,
504 u16 num_of_coalesced_segs)
e4e3c02a 505{
cbf1de72 506 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 507 * other than timestamp or IPv6 extension headers.
e4e3c02a 508 */
619c5cb6
VZ
509 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
510
511 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 512 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 513 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
514 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
515 } else {
619c5cb6 516 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
517 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
518 }
e4e3c02a
VZ
519
520 /* Check if there was a TCP timestamp, if there is it's will
521 * always be 12 bytes length: nop nop kind length echo val.
522 *
523 * Otherwise FW would close the aggregation.
524 */
525 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
526 hdrs_len += TPA_TSTAMP_OPT_LEN;
527
cbf1de72
YM
528 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
529
530 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
531 * to skb_shinfo(skb)->gso_segs
532 */
ab5777d7 533 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
534}
535
996dedba
MS
536static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
537 u16 index, gfp_t gfp_mask)
1191cb83 538{
996dedba 539 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
1191cb83
ED
540 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
541 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
542 dma_addr_t mapping;
543
544 if (unlikely(page == NULL)) {
545 BNX2X_ERR("Can't alloc sge\n");
546 return -ENOMEM;
547 }
548
549 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 550 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
551 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
552 __free_pages(page, PAGES_PER_SGE_SHIFT);
553 BNX2X_ERR("Can't map sge\n");
554 return -ENOMEM;
555 }
556
557 sw_buf->page = page;
558 dma_unmap_addr_set(sw_buf, mapping, mapping);
559
560 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
561 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
562
563 return 0;
564}
565
9f6c9258 566static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
567 struct bnx2x_agg_info *tpa_info,
568 u16 pages,
569 struct sk_buff *skb,
619c5cb6
VZ
570 struct eth_end_agg_rx_cqe *cqe,
571 u16 cqe_idx)
9f6c9258
DK
572{
573 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
574 u32 i, frag_len, frag_size;
575 int err, j, frag_id = 0;
619c5cb6 576 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 577 u16 full_page = 0, gro_size = 0;
9f6c9258 578
619c5cb6 579 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
580
581 if (fp->mode == TPA_MODE_GRO) {
582 gro_size = tpa_info->gro_size;
583 full_page = tpa_info->full_page;
584 }
9f6c9258
DK
585
586 /* This is needed in order to enable forwarding support */
cbf1de72
YM
587 if (frag_size)
588 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
589 le16_to_cpu(cqe->pkt_len),
590 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 591
9f6c9258 592#ifdef BNX2X_STOP_ON_ERROR
924d75ab 593 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
594 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
595 pages, cqe_idx);
619c5cb6 596 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
597 bnx2x_panic();
598 return -EINVAL;
599 }
600#endif
601
602 /* Run through the SGL and compose the fragmented skb */
603 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 604 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
605
606 /* FW gives the indices of the SGE as if the ring is an array
607 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
608 if (fp->mode == TPA_MODE_GRO)
609 frag_len = min_t(u32, frag_size, (u32)full_page);
610 else /* LRO */
924d75ab 611 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 612
9f6c9258
DK
613 rx_pg = &fp->rx_page_ring[sge_idx];
614 old_rx_pg = *rx_pg;
615
616 /* If we fail to allocate a substitute page, we simply stop
617 where we are and drop the whole packet */
996dedba 618 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 619 if (unlikely(err)) {
15192a8c 620 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
621 return err;
622 }
623
16a5fd92 624 /* Unmap the page as we're going to pass it to the stack */
9f6c9258
DK
625 dma_unmap_page(&bp->pdev->dev,
626 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 627 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 628 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
629 if (fp->mode == TPA_MODE_LRO)
630 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
631 else { /* GRO */
632 int rem;
633 int offset = 0;
634 for (rem = frag_len; rem > 0; rem -= gro_size) {
635 int len = rem > gro_size ? gro_size : rem;
636 skb_fill_page_desc(skb, frag_id++,
637 old_rx_pg.page, offset, len);
638 if (offset)
639 get_page(old_rx_pg.page);
640 offset += len;
641 }
642 }
9f6c9258
DK
643
644 skb->data_len += frag_len;
924d75ab 645 skb->truesize += SGE_PAGES;
9f6c9258
DK
646 skb->len += frag_len;
647
648 frag_size -= frag_len;
649 }
650
651 return 0;
652}
653
d46d132c
ED
654static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
655{
656 if (fp->rx_frag_size)
657 put_page(virt_to_head_page(data));
658 else
659 kfree(data);
660}
661
996dedba 662static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 663{
996dedba
MS
664 if (fp->rx_frag_size) {
665 /* GFP_KERNEL allocations are used only during initialization */
666 if (unlikely(gfp_mask & __GFP_WAIT))
667 return (void *)__get_free_page(gfp_mask);
668
d46d132c 669 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 670 }
d46d132c 671
996dedba 672 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
673}
674
9969085e
YM
675#ifdef CONFIG_INET
676static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
677{
678 const struct iphdr *iph = ip_hdr(skb);
679 struct tcphdr *th;
680
681 skb_set_transport_header(skb, sizeof(struct iphdr));
682 th = tcp_hdr(skb);
683
684 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
685 iph->saddr, iph->daddr, 0);
686}
687
688static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
689{
690 struct ipv6hdr *iph = ipv6_hdr(skb);
691 struct tcphdr *th;
692
693 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
694 th = tcp_hdr(skb);
695
696 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
697 &iph->saddr, &iph->daddr, 0);
698}
2c2d06d5
YM
699
700static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
701 void (*gro_func)(struct bnx2x*, struct sk_buff*))
702{
703 skb_set_network_header(skb, 0);
704 gro_func(bp, skb);
705 tcp_gro_complete(skb);
706}
9969085e
YM
707#endif
708
709static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
710 struct sk_buff *skb)
711{
712#ifdef CONFIG_INET
cbf1de72 713 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
714 switch (be16_to_cpu(skb->protocol)) {
715 case ETH_P_IP:
2c2d06d5 716 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
717 break;
718 case ETH_P_IPV6:
2c2d06d5 719 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
720 break;
721 default:
2c2d06d5 722 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
723 be16_to_cpu(skb->protocol));
724 }
9969085e
YM
725 }
726#endif
60e66fee 727 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
728 napi_gro_receive(&fp->napi, skb);
729}
730
1191cb83
ED
731static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
732 struct bnx2x_agg_info *tpa_info,
733 u16 pages,
734 struct eth_end_agg_rx_cqe *cqe,
735 u16 cqe_idx)
9f6c9258 736{
619c5cb6 737 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 738 u8 pad = tpa_info->placement_offset;
619c5cb6 739 u16 len = tpa_info->len_on_bd;
e52fcb24 740 struct sk_buff *skb = NULL;
621b4d66 741 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
742 u8 old_tpa_state = tpa_info->tpa_state;
743
744 tpa_info->tpa_state = BNX2X_TPA_STOP;
745
746 /* If we there was an error during the handling of the TPA_START -
747 * drop this aggregation.
748 */
749 if (old_tpa_state == BNX2X_TPA_ERROR)
750 goto drop;
751
e52fcb24 752 /* Try to allocate the new data */
996dedba 753 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
754 /* Unmap skb in the pool anyway, as we are going to change
755 pool entry status to BNX2X_TPA_STOP even if new skb allocation
756 fails. */
757 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 758 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 759 if (likely(new_data))
d46d132c 760 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 761
e52fcb24 762 if (likely(skb)) {
9f6c9258 763#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 764 if (pad + len > fp->rx_buf_size) {
51c1a580 765 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 766 pad, len, fp->rx_buf_size);
9f6c9258
DK
767 bnx2x_panic();
768 return;
769 }
770#endif
771
e52fcb24 772 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 773 skb_put(skb, len);
5495ab75 774 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
9f6c9258
DK
775
776 skb->protocol = eth_type_trans(skb, bp->dev);
777 skb->ip_summed = CHECKSUM_UNNECESSARY;
778
621b4d66
DK
779 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
780 skb, cqe, cqe_idx)) {
619c5cb6 781 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 782 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 783 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 784 } else {
51c1a580
MS
785 DP(NETIF_MSG_RX_STATUS,
786 "Failed to allocate new pages - dropping packet!\n");
40955532 787 dev_kfree_skb_any(skb);
9f6c9258
DK
788 }
789
e52fcb24
ED
790 /* put new data in bin */
791 rx_buf->data = new_data;
9f6c9258 792
619c5cb6 793 return;
9f6c9258 794 }
d46d132c 795 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
796drop:
797 /* drop the packet and keep the buffer in the bin */
798 DP(NETIF_MSG_RX_STATUS,
799 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 800 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
801}
802
996dedba
MS
803static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
804 u16 index, gfp_t gfp_mask)
1191cb83
ED
805{
806 u8 *data;
807 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
808 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
809 dma_addr_t mapping;
810
996dedba 811 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
812 if (unlikely(data == NULL))
813 return -ENOMEM;
814
815 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
816 fp->rx_buf_size,
817 DMA_FROM_DEVICE);
818 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 819 bnx2x_frag_free(fp, data);
1191cb83
ED
820 BNX2X_ERR("Can't map rx data\n");
821 return -ENOMEM;
822 }
823
824 rx_buf->data = data;
825 dma_unmap_addr_set(rx_buf, mapping, mapping);
826
827 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
828 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
829
830 return 0;
831}
832
15192a8c
BW
833static
834void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
835 struct bnx2x_fastpath *fp,
836 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 837{
e488921f
MS
838 /* Do nothing if no L4 csum validation was done.
839 * We do not check whether IP csum was validated. For IPv4 we assume
840 * that if the card got as far as validating the L4 csum, it also
841 * validated the IP csum. IPv6 has no IP csum.
842 */
d6cb3e41 843 if (cqe->fast_path_cqe.status_flags &
e488921f 844 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
845 return;
846
e488921f 847 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
848
849 if (cqe->fast_path_cqe.type_error_flags &
850 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
851 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 852 qstats->hw_csum_err++;
d6cb3e41
ED
853 else
854 skb->ip_summed = CHECKSUM_UNNECESSARY;
855}
9f6c9258 856
a8f47eb7 857static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
9f6c9258
DK
858{
859 struct bnx2x *bp = fp->bp;
860 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 861 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 862 int rx_pkt = 0;
75b29459
DK
863 union eth_rx_cqe *cqe;
864 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
865
866#ifdef BNX2X_STOP_ON_ERROR
867 if (unlikely(bp->panic))
868 return 0;
869#endif
870
9f6c9258
DK
871 bd_cons = fp->rx_bd_cons;
872 bd_prod = fp->rx_bd_prod;
873 bd_prod_fw = bd_prod;
874 sw_comp_cons = fp->rx_comp_cons;
875 sw_comp_prod = fp->rx_comp_prod;
876
75b29459
DK
877 comp_ring_cons = RCQ_BD(sw_comp_cons);
878 cqe = &fp->rx_comp_ring[comp_ring_cons];
879 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
880
881 DP(NETIF_MSG_RX_STATUS,
75b29459 882 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 883
75b29459 884 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
885 struct sw_rx_bd *rx_buf = NULL;
886 struct sk_buff *skb;
9f6c9258 887 u8 cqe_fp_flags;
619c5cb6 888 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 889 u16 len, pad, queue;
e52fcb24 890 u8 *data;
bd5cef03 891 u32 rxhash;
5495ab75 892 enum pkt_hash_types rxhash_type;
9f6c9258 893
619c5cb6
VZ
894#ifdef BNX2X_STOP_ON_ERROR
895 if (unlikely(bp->panic))
896 return 0;
897#endif
898
9f6c9258
DK
899 bd_prod = RX_BD(bd_prod);
900 bd_cons = RX_BD(bd_cons);
901
619c5cb6
VZ
902 cqe_fp_flags = cqe_fp->type_error_flags;
903 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 904
51c1a580
MS
905 DP(NETIF_MSG_RX_STATUS,
906 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
907 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
908 cqe_fp_flags, cqe_fp->status_flags,
909 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
910 le16_to_cpu(cqe_fp->vlan_tag),
911 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
912
913 /* is this a slowpath msg? */
619c5cb6 914 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
915 bnx2x_sp_event(fp, cqe);
916 goto next_cqe;
e52fcb24 917 }
621b4d66 918
e52fcb24
ED
919 rx_buf = &fp->rx_buf_ring[bd_cons];
920 data = rx_buf->data;
9f6c9258 921
e52fcb24 922 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
923 struct bnx2x_agg_info *tpa_info;
924 u16 frag_size, pages;
619c5cb6 925#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
926 /* sanity check */
927 if (fp->disable_tpa &&
928 (CQE_TYPE_START(cqe_fp_type) ||
929 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 930 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 931 CQE_TYPE(cqe_fp_type));
619c5cb6 932#endif
9f6c9258 933
e52fcb24
ED
934 if (CQE_TYPE_START(cqe_fp_type)) {
935 u16 queue = cqe_fp->queue_index;
936 DP(NETIF_MSG_RX_STATUS,
937 "calling tpa_start on queue %d\n",
938 queue);
9f6c9258 939
e52fcb24
ED
940 bnx2x_tpa_start(fp, queue,
941 bd_cons, bd_prod,
942 cqe_fp);
621b4d66 943
e52fcb24 944 goto next_rx;
621b4d66
DK
945 }
946 queue = cqe->end_agg_cqe.queue_index;
947 tpa_info = &fp->tpa_info[queue];
948 DP(NETIF_MSG_RX_STATUS,
949 "calling tpa_stop on queue %d\n",
950 queue);
951
952 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
953 tpa_info->len_on_bd;
954
955 if (fp->mode == TPA_MODE_GRO)
956 pages = (frag_size + tpa_info->full_page - 1) /
957 tpa_info->full_page;
958 else
959 pages = SGE_PAGE_ALIGN(frag_size) >>
960 SGE_PAGE_SHIFT;
961
962 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
963 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 964#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
965 if (bp->panic)
966 return 0;
9f6c9258
DK
967#endif
968
621b4d66
DK
969 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
970 goto next_cqe;
e52fcb24
ED
971 }
972 /* non TPA */
621b4d66 973 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
974 pad = cqe_fp->placement_offset;
975 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 976 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
977 pad + RX_COPY_THRESH,
978 DMA_FROM_DEVICE);
979 pad += NET_SKB_PAD;
980 prefetch(data + pad); /* speedup eth_type_trans() */
981 /* is this an error packet? */
982 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 983 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
984 "ERROR flags %x rx packet %u\n",
985 cqe_fp_flags, sw_comp_cons);
15192a8c 986 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
987 goto reuse_rx;
988 }
9f6c9258 989
e52fcb24
ED
990 /* Since we don't have a jumbo ring
991 * copy small packets if mtu > 1500
992 */
993 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
994 (len <= RX_COPY_THRESH)) {
995 skb = netdev_alloc_skb_ip_align(bp->dev, len);
996 if (skb == NULL) {
51c1a580 997 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 998 "ERROR packet dropped because of alloc failure\n");
15192a8c 999 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
1000 goto reuse_rx;
1001 }
e52fcb24
ED
1002 memcpy(skb->data, data + pad, len);
1003 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1004 } else {
996dedba
MS
1005 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1006 GFP_ATOMIC) == 0)) {
9f6c9258 1007 dma_unmap_single(&bp->pdev->dev,
e52fcb24 1008 dma_unmap_addr(rx_buf, mapping),
a8c94b91 1009 fp->rx_buf_size,
9f6c9258 1010 DMA_FROM_DEVICE);
d46d132c 1011 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 1012 if (unlikely(!skb)) {
d46d132c 1013 bnx2x_frag_free(fp, data);
15192a8c
BW
1014 bnx2x_fp_qstats(bp, fp)->
1015 rx_skb_alloc_failed++;
e52fcb24
ED
1016 goto next_rx;
1017 }
9f6c9258 1018 skb_reserve(skb, pad);
9f6c9258 1019 } else {
51c1a580
MS
1020 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1021 "ERROR packet dropped because of alloc failure\n");
15192a8c 1022 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 1023reuse_rx:
e52fcb24 1024 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
1025 goto next_rx;
1026 }
036d2df9 1027 }
9f6c9258 1028
036d2df9
DK
1029 skb_put(skb, len);
1030 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 1031
036d2df9 1032 /* Set Toeplitz hash for a none-LRO skb */
5495ab75
TH
1033 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1034 skb_set_hash(skb, rxhash, rxhash_type);
9f6c9258 1035
036d2df9 1036 skb_checksum_none_assert(skb);
f85582f8 1037
d6cb3e41 1038 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
1039 bnx2x_csum_validate(skb, cqe, fp,
1040 bnx2x_fp_qstats(bp, fp));
9f6c9258 1041
f233cafe 1042 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1043
619c5cb6
VZ
1044 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1045 PARSING_FLAGS_VLAN)
86a9bad3 1046 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1047 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1048
8b80cda5 1049 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1050
1051 if (bnx2x_fp_ll_polling(fp))
1052 netif_receive_skb(skb);
1053 else
1054 napi_gro_receive(&fp->napi, skb);
9f6c9258 1055next_rx:
e52fcb24 1056 rx_buf->data = NULL;
9f6c9258
DK
1057
1058 bd_cons = NEXT_RX_IDX(bd_cons);
1059 bd_prod = NEXT_RX_IDX(bd_prod);
1060 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1061 rx_pkt++;
1062next_cqe:
1063 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1064 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1065
75b29459
DK
1066 /* mark CQE as free */
1067 BNX2X_SEED_CQE(cqe_fp);
1068
9f6c9258
DK
1069 if (rx_pkt == budget)
1070 break;
75b29459
DK
1071
1072 comp_ring_cons = RCQ_BD(sw_comp_cons);
1073 cqe = &fp->rx_comp_ring[comp_ring_cons];
1074 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1075 } /* while */
1076
1077 fp->rx_bd_cons = bd_cons;
1078 fp->rx_bd_prod = bd_prod_fw;
1079 fp->rx_comp_cons = sw_comp_cons;
1080 fp->rx_comp_prod = sw_comp_prod;
1081
1082 /* Update producers */
1083 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1084 fp->rx_sge_prod);
1085
1086 fp->rx_pkt += rx_pkt;
1087 fp->rx_calls++;
1088
1089 return rx_pkt;
1090}
1091
1092static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1093{
1094 struct bnx2x_fastpath *fp = fp_cookie;
1095 struct bnx2x *bp = fp->bp;
6383c0b3 1096 u8 cos;
9f6c9258 1097
51c1a580
MS
1098 DP(NETIF_MSG_INTR,
1099 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1100 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1101
523224a3 1102 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1103
1104#ifdef BNX2X_STOP_ON_ERROR
1105 if (unlikely(bp->panic))
1106 return IRQ_HANDLED;
1107#endif
1108
1109 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1110 for_each_cos_in_tx_queue(fp, cos)
65565884 1111 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1112
523224a3 1113 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1114 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1115
1116 return IRQ_HANDLED;
1117}
1118
9f6c9258
DK
1119/* HW Lock for shared dual port PHYs */
1120void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1121{
1122 mutex_lock(&bp->port.phy_mutex);
1123
8203c4b6 1124 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1125}
1126
1127void bnx2x_release_phy_lock(struct bnx2x *bp)
1128{
8203c4b6 1129 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1130
1131 mutex_unlock(&bp->port.phy_mutex);
1132}
1133
0793f83f
DK
1134/* calculates MF speed according to current linespeed and MF configuration */
1135u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1136{
1137 u16 line_speed = bp->link_vars.line_speed;
1138 if (IS_MF(bp)) {
faa6fcbb
DK
1139 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1140 bp->mf_config[BP_VN(bp)]);
1141
1142 /* Calculate the current MAX line speed limit for the MF
1143 * devices
0793f83f 1144 */
faa6fcbb
DK
1145 if (IS_MF_SI(bp))
1146 line_speed = (line_speed * maxCfg) / 100;
1147 else { /* SD mode */
0793f83f
DK
1148 u16 vn_max_rate = maxCfg * 100;
1149
1150 if (vn_max_rate < line_speed)
1151 line_speed = vn_max_rate;
faa6fcbb 1152 }
0793f83f
DK
1153 }
1154
1155 return line_speed;
1156}
1157
2ae17f66
VZ
1158/**
1159 * bnx2x_fill_report_data - fill link report data to report
1160 *
1161 * @bp: driver handle
1162 * @data: link state to update
1163 *
1164 * It uses a none-atomic bit operations because is called under the mutex.
1165 */
1191cb83
ED
1166static void bnx2x_fill_report_data(struct bnx2x *bp,
1167 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1168{
1169 u16 line_speed = bnx2x_get_mf_speed(bp);
1170
1171 memset(data, 0, sizeof(*data));
1172
16a5fd92 1173 /* Fill the report data: effective line speed */
2ae17f66
VZ
1174 data->line_speed = line_speed;
1175
1176 /* Link is down */
1177 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1178 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1179 &data->link_report_flags);
1180
1181 /* Full DUPLEX */
1182 if (bp->link_vars.duplex == DUPLEX_FULL)
1183 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1184
1185 /* Rx Flow Control is ON */
1186 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1187 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1188
1189 /* Tx Flow Control is ON */
1190 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1191 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1192}
1193
1194/**
1195 * bnx2x_link_report - report link status to OS.
1196 *
1197 * @bp: driver handle
1198 *
1199 * Calls the __bnx2x_link_report() under the same locking scheme
1200 * as a link/PHY state managing code to ensure a consistent link
1201 * reporting.
1202 */
1203
9f6c9258
DK
1204void bnx2x_link_report(struct bnx2x *bp)
1205{
2ae17f66
VZ
1206 bnx2x_acquire_phy_lock(bp);
1207 __bnx2x_link_report(bp);
1208 bnx2x_release_phy_lock(bp);
1209}
9f6c9258 1210
2ae17f66
VZ
1211/**
1212 * __bnx2x_link_report - report link status to OS.
1213 *
1214 * @bp: driver handle
1215 *
16a5fd92 1216 * None atomic implementation.
2ae17f66
VZ
1217 * Should be called under the phy_lock.
1218 */
1219void __bnx2x_link_report(struct bnx2x *bp)
1220{
1221 struct bnx2x_link_report_data cur_data;
9f6c9258 1222
2ae17f66 1223 /* reread mf_cfg */
ad5afc89 1224 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1225 bnx2x_read_mf_cfg(bp);
1226
1227 /* Read the current link report info */
1228 bnx2x_fill_report_data(bp, &cur_data);
1229
1230 /* Don't report link down or exactly the same link status twice */
1231 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1232 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1233 &bp->last_reported_link.link_report_flags) &&
1234 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1235 &cur_data.link_report_flags)))
1236 return;
1237
1238 bp->link_cnt++;
9f6c9258 1239
2ae17f66
VZ
1240 /* We are going to report a new link parameters now -
1241 * remember the current data for the next time.
1242 */
1243 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1244
2ae17f66
VZ
1245 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1246 &cur_data.link_report_flags)) {
1247 netif_carrier_off(bp->dev);
1248 netdev_err(bp->dev, "NIC Link is Down\n");
1249 return;
1250 } else {
94f05b0f
JP
1251 const char *duplex;
1252 const char *flow;
1253
2ae17f66 1254 netif_carrier_on(bp->dev);
9f6c9258 1255
2ae17f66
VZ
1256 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1257 &cur_data.link_report_flags))
94f05b0f 1258 duplex = "full";
9f6c9258 1259 else
94f05b0f 1260 duplex = "half";
9f6c9258 1261
2ae17f66
VZ
1262 /* Handle the FC at the end so that only these flags would be
1263 * possibly set. This way we may easily check if there is no FC
1264 * enabled.
1265 */
1266 if (cur_data.link_report_flags) {
1267 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1268 &cur_data.link_report_flags)) {
2ae17f66
VZ
1269 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1270 &cur_data.link_report_flags))
94f05b0f
JP
1271 flow = "ON - receive & transmit";
1272 else
1273 flow = "ON - receive";
9f6c9258 1274 } else {
94f05b0f 1275 flow = "ON - transmit";
9f6c9258 1276 }
94f05b0f
JP
1277 } else {
1278 flow = "none";
9f6c9258 1279 }
94f05b0f
JP
1280 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1281 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1282 }
1283}
1284
1191cb83
ED
1285static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1286{
1287 int i;
1288
1289 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1290 struct eth_rx_sge *sge;
1291
1292 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1293 sge->addr_hi =
1294 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1295 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1296
1297 sge->addr_lo =
1298 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1299 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1300 }
1301}
1302
1303static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1304 struct bnx2x_fastpath *fp, int last)
1305{
1306 int i;
1307
1308 for (i = 0; i < last; i++) {
1309 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1310 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1311 u8 *data = first_buf->data;
1312
1313 if (data == NULL) {
1314 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1315 continue;
1316 }
1317 if (tpa_info->tpa_state == BNX2X_TPA_START)
1318 dma_unmap_single(&bp->pdev->dev,
1319 dma_unmap_addr(first_buf, mapping),
1320 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1321 bnx2x_frag_free(fp, data);
1191cb83
ED
1322 first_buf->data = NULL;
1323 }
1324}
1325
55c11941
MS
1326void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1327{
1328 int j;
1329
1330 for_each_rx_queue_cnic(bp, j) {
1331 struct bnx2x_fastpath *fp = &bp->fp[j];
1332
1333 fp->rx_bd_cons = 0;
1334
1335 /* Activate BD ring */
1336 /* Warning!
1337 * this will generate an interrupt (to the TSTORM)
1338 * must only be done after chip is initialized
1339 */
1340 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1341 fp->rx_sge_prod);
1342 }
1343}
1344
9f6c9258
DK
1345void bnx2x_init_rx_rings(struct bnx2x *bp)
1346{
1347 int func = BP_FUNC(bp);
523224a3 1348 u16 ring_prod;
9f6c9258 1349 int i, j;
25141580 1350
b3b83c3f 1351 /* Allocate TPA resources */
55c11941 1352 for_each_eth_queue(bp, j) {
523224a3 1353 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1354
a8c94b91
VZ
1355 DP(NETIF_MSG_IFUP,
1356 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1357
523224a3 1358 if (!fp->disable_tpa) {
16a5fd92 1359 /* Fill the per-aggregation pool */
dfacf138 1360 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1361 struct bnx2x_agg_info *tpa_info =
1362 &fp->tpa_info[i];
1363 struct sw_rx_bd *first_buf =
1364 &tpa_info->first_buf;
1365
996dedba
MS
1366 first_buf->data =
1367 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1368 if (!first_buf->data) {
51c1a580
MS
1369 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1370 j);
9f6c9258
DK
1371 bnx2x_free_tpa_pool(bp, fp, i);
1372 fp->disable_tpa = 1;
1373 break;
1374 }
619c5cb6
VZ
1375 dma_unmap_addr_set(first_buf, mapping, 0);
1376 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1377 }
523224a3
DK
1378
1379 /* "next page" elements initialization */
1380 bnx2x_set_next_page_sgl(fp);
1381
1382 /* set SGEs bit mask */
1383 bnx2x_init_sge_ring_bit_mask(fp);
1384
1385 /* Allocate SGEs and initialize the ring elements */
1386 for (i = 0, ring_prod = 0;
1387 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1388
996dedba
MS
1389 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1390 GFP_KERNEL) < 0) {
51c1a580
MS
1391 BNX2X_ERR("was only able to allocate %d rx sges\n",
1392 i);
1393 BNX2X_ERR("disabling TPA for queue[%d]\n",
1394 j);
523224a3 1395 /* Cleanup already allocated elements */
619c5cb6
VZ
1396 bnx2x_free_rx_sge_range(bp, fp,
1397 ring_prod);
1398 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1399 MAX_AGG_QS(bp));
523224a3
DK
1400 fp->disable_tpa = 1;
1401 ring_prod = 0;
1402 break;
1403 }
1404 ring_prod = NEXT_SGE_IDX(ring_prod);
1405 }
1406
1407 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1408 }
1409 }
1410
55c11941 1411 for_each_eth_queue(bp, j) {
9f6c9258
DK
1412 struct bnx2x_fastpath *fp = &bp->fp[j];
1413
1414 fp->rx_bd_cons = 0;
9f6c9258 1415
b3b83c3f
DK
1416 /* Activate BD ring */
1417 /* Warning!
1418 * this will generate an interrupt (to the TSTORM)
1419 * must only be done after chip is initialized
1420 */
1421 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1422 fp->rx_sge_prod);
9f6c9258 1423
9f6c9258
DK
1424 if (j != 0)
1425 continue;
1426
619c5cb6 1427 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1428 REG_WR(bp, BAR_USTRORM_INTMEM +
1429 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1430 U64_LO(fp->rx_comp_mapping));
1431 REG_WR(bp, BAR_USTRORM_INTMEM +
1432 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1433 U64_HI(fp->rx_comp_mapping));
1434 }
9f6c9258
DK
1435 }
1436}
f85582f8 1437
55c11941 1438static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1439{
6383c0b3 1440 u8 cos;
55c11941 1441 struct bnx2x *bp = fp->bp;
9f6c9258 1442
55c11941
MS
1443 for_each_cos_in_tx_queue(fp, cos) {
1444 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1445 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1446
55c11941
MS
1447 u16 sw_prod = txdata->tx_pkt_prod;
1448 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1449
55c11941
MS
1450 while (sw_cons != sw_prod) {
1451 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1452 &pkts_compl, &bytes_compl);
1453 sw_cons++;
9f6c9258 1454 }
55c11941
MS
1455
1456 netdev_tx_reset_queue(
1457 netdev_get_tx_queue(bp->dev,
1458 txdata->txq_index));
1459 }
1460}
1461
1462static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1463{
1464 int i;
1465
1466 for_each_tx_queue_cnic(bp, i) {
1467 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1468 }
1469}
1470
1471static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1472{
1473 int i;
1474
1475 for_each_eth_queue(bp, i) {
1476 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1477 }
1478}
1479
b3b83c3f
DK
1480static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1481{
1482 struct bnx2x *bp = fp->bp;
1483 int i;
1484
1485 /* ring wasn't allocated */
1486 if (fp->rx_buf_ring == NULL)
1487 return;
1488
1489 for (i = 0; i < NUM_RX_BD; i++) {
1490 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1491 u8 *data = rx_buf->data;
b3b83c3f 1492
e52fcb24 1493 if (data == NULL)
b3b83c3f 1494 continue;
b3b83c3f
DK
1495 dma_unmap_single(&bp->pdev->dev,
1496 dma_unmap_addr(rx_buf, mapping),
1497 fp->rx_buf_size, DMA_FROM_DEVICE);
1498
e52fcb24 1499 rx_buf->data = NULL;
d46d132c 1500 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1501 }
1502}
1503
55c11941
MS
1504static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1505{
1506 int j;
1507
1508 for_each_rx_queue_cnic(bp, j) {
1509 bnx2x_free_rx_bds(&bp->fp[j]);
1510 }
1511}
1512
9f6c9258
DK
1513static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1514{
b3b83c3f 1515 int j;
9f6c9258 1516
55c11941 1517 for_each_eth_queue(bp, j) {
9f6c9258
DK
1518 struct bnx2x_fastpath *fp = &bp->fp[j];
1519
b3b83c3f 1520 bnx2x_free_rx_bds(fp);
9f6c9258 1521
9f6c9258 1522 if (!fp->disable_tpa)
dfacf138 1523 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1524 }
1525}
1526
a8f47eb7 1527static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
55c11941
MS
1528{
1529 bnx2x_free_tx_skbs_cnic(bp);
1530 bnx2x_free_rx_skbs_cnic(bp);
1531}
1532
9f6c9258
DK
1533void bnx2x_free_skbs(struct bnx2x *bp)
1534{
1535 bnx2x_free_tx_skbs(bp);
1536 bnx2x_free_rx_skbs(bp);
1537}
1538
e3835b99
DK
1539void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1540{
1541 /* load old values */
1542 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1543
1544 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1545 /* leave all but MAX value */
1546 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1547
1548 /* set new MAX value */
1549 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1550 & FUNC_MF_CFG_MAX_BW_MASK;
1551
1552 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1553 }
1554}
1555
ca92429f
DK
1556/**
1557 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1558 *
1559 * @bp: driver handle
1560 * @nvecs: number of vectors to be released
1561 */
1562static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1563{
ca92429f 1564 int i, offset = 0;
9f6c9258 1565
ca92429f
DK
1566 if (nvecs == offset)
1567 return;
ad5afc89
AE
1568
1569 /* VFs don't have a default SB */
1570 if (IS_PF(bp)) {
1571 free_irq(bp->msix_table[offset].vector, bp->dev);
1572 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1573 bp->msix_table[offset].vector);
1574 offset++;
1575 }
55c11941
MS
1576
1577 if (CNIC_SUPPORT(bp)) {
1578 if (nvecs == offset)
1579 return;
1580 offset++;
1581 }
ca92429f 1582
ec6ba945 1583 for_each_eth_queue(bp, i) {
ca92429f
DK
1584 if (nvecs == offset)
1585 return;
51c1a580
MS
1586 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1587 i, bp->msix_table[offset].vector);
9f6c9258 1588
ca92429f 1589 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1590 }
1591}
1592
d6214d7a 1593void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1594{
30a5de77 1595 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1596 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1597 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1598
1599 /* vfs don't have a default status block */
1600 if (IS_PF(bp))
1601 nvecs++;
1602
1603 bnx2x_free_msix_irqs(bp, nvecs);
1604 } else {
30a5de77 1605 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1606 }
9f6c9258
DK
1607}
1608
0e8d2ec5 1609int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1610{
1ab4434c 1611 int msix_vec = 0, i, rc;
9f6c9258 1612
1ab4434c
AE
1613 /* VFs don't have a default status block */
1614 if (IS_PF(bp)) {
1615 bp->msix_table[msix_vec].entry = msix_vec;
1616 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1617 bp->msix_table[0].entry);
1618 msix_vec++;
1619 }
9f6c9258 1620
55c11941
MS
1621 /* Cnic requires an msix vector for itself */
1622 if (CNIC_SUPPORT(bp)) {
1623 bp->msix_table[msix_vec].entry = msix_vec;
1624 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1625 msix_vec, bp->msix_table[msix_vec].entry);
1626 msix_vec++;
1627 }
1628
6383c0b3 1629 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1630 for_each_eth_queue(bp, i) {
d6214d7a 1631 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1632 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1633 msix_vec, msix_vec, i);
d6214d7a 1634 msix_vec++;
9f6c9258
DK
1635 }
1636
1ab4434c
AE
1637 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1638 msix_vec);
d6214d7a 1639
a5444b17
AG
1640 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1641 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
9f6c9258
DK
1642 /*
1643 * reconfigure number of tx/rx queues according to available
1644 * MSI-X vectors
1645 */
a5444b17 1646 if (rc == -ENOSPC) {
30a5de77 1647 /* Get by with single vector */
a5444b17
AG
1648 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1649 if (rc < 0) {
30a5de77
DK
1650 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1651 rc);
1652 goto no_msix;
1653 }
1654
1655 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1656 bp->flags |= USING_SINGLE_MSIX_FLAG;
1657
55c11941
MS
1658 BNX2X_DEV_INFO("set number of queues to 1\n");
1659 bp->num_ethernet_queues = 1;
1660 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1661 } else if (rc < 0) {
a5444b17 1662 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1663 goto no_msix;
a5444b17
AG
1664 } else if (rc < msix_vec) {
1665 /* how less vectors we will have? */
1666 int diff = msix_vec - rc;
1667
1668 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1669
1670 /*
1671 * decrease number of queues by number of unallocated entries
1672 */
1673 bp->num_ethernet_queues -= diff;
1674 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1675
1676 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1677 bp->num_queues);
9f6c9258
DK
1678 }
1679
1680 bp->flags |= USING_MSIX_FLAG;
1681
1682 return 0;
30a5de77
DK
1683
1684no_msix:
1685 /* fall to INTx if not enough memory */
1686 if (rc == -ENOMEM)
1687 bp->flags |= DISABLE_MSI_FLAG;
1688
1689 return rc;
9f6c9258
DK
1690}
1691
1692static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1693{
ca92429f 1694 int i, rc, offset = 0;
9f6c9258 1695
ad5afc89
AE
1696 /* no default status block for vf */
1697 if (IS_PF(bp)) {
1698 rc = request_irq(bp->msix_table[offset++].vector,
1699 bnx2x_msix_sp_int, 0,
1700 bp->dev->name, bp->dev);
1701 if (rc) {
1702 BNX2X_ERR("request sp irq failed\n");
1703 return -EBUSY;
1704 }
9f6c9258
DK
1705 }
1706
55c11941
MS
1707 if (CNIC_SUPPORT(bp))
1708 offset++;
1709
ec6ba945 1710 for_each_eth_queue(bp, i) {
9f6c9258
DK
1711 struct bnx2x_fastpath *fp = &bp->fp[i];
1712 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1713 bp->dev->name, i);
1714
d6214d7a 1715 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1716 bnx2x_msix_fp_int, 0, fp->name, fp);
1717 if (rc) {
ca92429f
DK
1718 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1719 bp->msix_table[offset].vector, rc);
1720 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1721 return -EBUSY;
1722 }
1723
d6214d7a 1724 offset++;
9f6c9258
DK
1725 }
1726
ec6ba945 1727 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1728 if (IS_PF(bp)) {
1729 offset = 1 + CNIC_SUPPORT(bp);
1730 netdev_info(bp->dev,
1731 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1732 bp->msix_table[0].vector,
1733 0, bp->msix_table[offset].vector,
1734 i - 1, bp->msix_table[offset + i - 1].vector);
1735 } else {
1736 offset = CNIC_SUPPORT(bp);
1737 netdev_info(bp->dev,
1738 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1739 0, bp->msix_table[offset].vector,
1740 i - 1, bp->msix_table[offset + i - 1].vector);
1741 }
9f6c9258
DK
1742 return 0;
1743}
1744
d6214d7a 1745int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1746{
1747 int rc;
1748
1749 rc = pci_enable_msi(bp->pdev);
1750 if (rc) {
51c1a580 1751 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1752 return -1;
1753 }
1754 bp->flags |= USING_MSI_FLAG;
1755
1756 return 0;
1757}
1758
1759static int bnx2x_req_irq(struct bnx2x *bp)
1760{
1761 unsigned long flags;
30a5de77 1762 unsigned int irq;
9f6c9258 1763
30a5de77 1764 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1765 flags = 0;
1766 else
1767 flags = IRQF_SHARED;
1768
30a5de77
DK
1769 if (bp->flags & USING_MSIX_FLAG)
1770 irq = bp->msix_table[0].vector;
1771 else
1772 irq = bp->pdev->irq;
1773
1774 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1775}
1776
c957d09f 1777static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1778{
1779 int rc = 0;
30a5de77
DK
1780 if (bp->flags & USING_MSIX_FLAG &&
1781 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1782 rc = bnx2x_req_msix_irqs(bp);
1783 if (rc)
1784 return rc;
1785 } else {
619c5cb6
VZ
1786 rc = bnx2x_req_irq(bp);
1787 if (rc) {
1788 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1789 return rc;
1790 }
1791 if (bp->flags & USING_MSI_FLAG) {
1792 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1793 netdev_info(bp->dev, "using MSI IRQ %d\n",
1794 bp->dev->irq);
1795 }
1796 if (bp->flags & USING_MSIX_FLAG) {
1797 bp->dev->irq = bp->msix_table[0].vector;
1798 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1799 bp->dev->irq);
619c5cb6
VZ
1800 }
1801 }
1802
1803 return 0;
1804}
1805
55c11941
MS
1806static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1807{
1808 int i;
1809
8f20aa57
DK
1810 for_each_rx_queue_cnic(bp, i) {
1811 bnx2x_fp_init_lock(&bp->fp[i]);
55c11941 1812 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1813 }
55c11941
MS
1814}
1815
1191cb83 1816static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1817{
1818 int i;
1819
8f20aa57
DK
1820 for_each_eth_queue(bp, i) {
1821 bnx2x_fp_init_lock(&bp->fp[i]);
9f6c9258 1822 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1823 }
9f6c9258
DK
1824}
1825
55c11941
MS
1826static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1827{
1828 int i;
1829
8f20aa57 1830 for_each_rx_queue_cnic(bp, i) {
55c11941 1831 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1832 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1833 usleep_range(1000, 2000);
8f20aa57 1834 }
55c11941
MS
1835}
1836
1191cb83 1837static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1838{
1839 int i;
1840
8f20aa57 1841 for_each_eth_queue(bp, i) {
9f6c9258 1842 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1843 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1844 usleep_range(1000, 2000);
8f20aa57 1845 }
9f6c9258
DK
1846}
1847
1848void bnx2x_netif_start(struct bnx2x *bp)
1849{
4b7ed897
DK
1850 if (netif_running(bp->dev)) {
1851 bnx2x_napi_enable(bp);
55c11941
MS
1852 if (CNIC_LOADED(bp))
1853 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1854 bnx2x_int_enable(bp);
1855 if (bp->state == BNX2X_STATE_OPEN)
1856 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1857 }
1858}
1859
1860void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1861{
1862 bnx2x_int_disable_sync(bp, disable_hw);
1863 bnx2x_napi_disable(bp);
55c11941
MS
1864 if (CNIC_LOADED(bp))
1865 bnx2x_napi_disable_cnic(bp);
9f6c9258 1866}
9f6c9258 1867
f663dd9a 1868u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
99932d4f 1869 void *accel_priv, select_queue_fallback_t fallback)
8307fa3e 1870{
8307fa3e 1871 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1872
55c11941 1873 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1874 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1875 u16 ether_type = ntohs(hdr->h_proto);
1876
1877 /* Skip VLAN tag if present */
1878 if (ether_type == ETH_P_8021Q) {
1879 struct vlan_ethhdr *vhdr =
1880 (struct vlan_ethhdr *)skb->data;
1881
1882 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1883 }
1884
1885 /* If ethertype is FCoE or FIP - use FCoE ring */
1886 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1887 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1888 }
55c11941 1889
cdb9d6ae 1890 /* select a non-FCoE queue */
99932d4f 1891 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1892}
1893
d6214d7a
DK
1894void bnx2x_set_num_queues(struct bnx2x *bp)
1895{
96305234 1896 /* RSS queues */
55c11941 1897 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1898
a3348722
BW
1899 /* override in STORAGE SD modes */
1900 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1901 bp->num_ethernet_queues = 1;
1902
ec6ba945 1903 /* Add special queues */
55c11941
MS
1904 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1905 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1906
1907 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1908}
1909
cdb9d6ae
VZ
1910/**
1911 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1912 *
1913 * @bp: Driver handle
1914 *
1915 * We currently support for at most 16 Tx queues for each CoS thus we will
1916 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1917 * bp->max_cos.
1918 *
1919 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1920 * index after all ETH L2 indices.
1921 *
1922 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1923 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1924 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1925 *
1926 * The proper configuration of skb->queue_mapping is handled by
1927 * bnx2x_select_queue() and __skb_tx_hash().
1928 *
1929 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1930 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1931 */
55c11941 1932static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1933{
6383c0b3 1934 int rc, tx, rx;
ec6ba945 1935
65565884 1936 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1937 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1938
6383c0b3 1939/* account for fcoe queue */
55c11941
MS
1940 if (include_cnic && !NO_FCOE(bp)) {
1941 rx++;
1942 tx++;
6383c0b3 1943 }
6383c0b3
AE
1944
1945 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1946 if (rc) {
1947 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1948 return rc;
1949 }
1950 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1951 if (rc) {
1952 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1953 return rc;
1954 }
1955
51c1a580 1956 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1957 tx, rx);
1958
ec6ba945
VZ
1959 return rc;
1960}
1961
1191cb83 1962static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1963{
1964 int i;
1965
1966 for_each_queue(bp, i) {
1967 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1968 u32 mtu;
a8c94b91
VZ
1969
1970 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1971 if (IS_FCOE_IDX(i))
1972 /*
1973 * Although there are no IP frames expected to arrive to
1974 * this ring we still want to add an
1975 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1976 * overrun attack.
1977 */
e52fcb24 1978 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1979 else
e52fcb24
ED
1980 mtu = bp->dev->mtu;
1981 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1982 IP_HEADER_ALIGNMENT_PADDING +
1983 ETH_OVREHEAD +
1984 mtu +
1985 BNX2X_FW_RX_ALIGN_END;
16a5fd92 1986 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
1987 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1988 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1989 else
1990 fp->rx_frag_size = 0;
a8c94b91
VZ
1991 }
1992}
1993
60cad4e6 1994static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
1995{
1996 int i;
619c5cb6
VZ
1997 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1998
16a5fd92 1999 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
2000 * enabled
2001 */
5d317c6a
MS
2002 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2003 bp->rss_conf_obj.ind_table[i] =
96305234
DK
2004 bp->fp->cl_id +
2005 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
2006
2007 /*
2008 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2009 * per-port, so if explicit configuration is needed , do it only
2010 * for a PMF.
2011 *
2012 * For 57712 and newer on the other hand it's a per-function
2013 * configuration.
2014 */
5d317c6a 2015 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
2016}
2017
60cad4e6
AE
2018int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2019 bool config_hash, bool enable)
619c5cb6 2020{
3b603066 2021 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
2022
2023 /* Although RSS is meaningless when there is a single HW queue we
2024 * still need it enabled in order to have HW Rx hash generated.
2025 *
2026 * if (!is_eth_multi(bp))
2027 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2028 */
2029
96305234 2030 params.rss_obj = rss_obj;
619c5cb6
VZ
2031
2032 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2033
60cad4e6
AE
2034 if (enable) {
2035 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2036
2037 /* RSS configuration */
2038 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2039 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2040 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2041 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2042 if (rss_obj->udp_rss_v4)
2043 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2044 if (rss_obj->udp_rss_v6)
2045 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2046 } else {
2047 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2048 }
619c5cb6 2049
96305234
DK
2050 /* Hash bits */
2051 params.rss_result_mask = MULTI_MASK;
619c5cb6 2052
5d317c6a 2053 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2054
96305234
DK
2055 if (config_hash) {
2056 /* RSS keys */
60cad4e6 2057 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2058 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2059 }
2060
60cad4e6
AE
2061 if (IS_PF(bp))
2062 return bnx2x_config_rss(bp, &params);
2063 else
2064 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2065}
2066
1191cb83 2067static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2068{
3b603066 2069 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2070
2071 /* Prepare parameters for function state transitions */
2072 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2073
2074 func_params.f_obj = &bp->func_obj;
2075 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2076
2077 func_params.params.hw_init.load_phase = load_code;
2078
2079 return bnx2x_func_state_change(bp, &func_params);
2080}
2081
2082/*
2083 * Cleans the object that have internal lists without sending
16a5fd92 2084 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2085 */
7fa6f340 2086void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2087{
2088 int rc;
2089 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2090 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2091 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2092
2093 /***************** Cleanup MACs' object first *************************/
2094
2095 /* Wait for completion of requested */
2096 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2097 /* Perform a dry cleanup */
2098 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2099
2100 /* Clean ETH primary MAC */
2101 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2102 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2103 &ramrod_flags);
2104 if (rc != 0)
2105 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2106
2107 /* Cleanup UC list */
2108 vlan_mac_flags = 0;
2109 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2110 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2111 &ramrod_flags);
2112 if (rc != 0)
2113 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2114
2115 /***************** Now clean mcast object *****************************/
2116 rparam.mcast_obj = &bp->mcast_obj;
2117 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2118
8b09be5f
YM
2119 /* Add a DEL command... - Since we're doing a driver cleanup only,
2120 * we take a lock surrounding both the initial send and the CONTs,
2121 * as we don't want a true completion to disrupt us in the middle.
2122 */
2123 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2124 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2125 if (rc < 0)
51c1a580
MS
2126 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2127 rc);
619c5cb6
VZ
2128
2129 /* ...and wait until all pending commands are cleared */
2130 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2131 while (rc != 0) {
2132 if (rc < 0) {
2133 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2134 rc);
8b09be5f 2135 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2136 return;
2137 }
2138
2139 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2140 }
8b09be5f 2141 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2142}
2143
2144#ifndef BNX2X_STOP_ON_ERROR
2145#define LOAD_ERROR_EXIT(bp, label) \
2146 do { \
2147 (bp)->state = BNX2X_STATE_ERROR; \
2148 goto label; \
2149 } while (0)
55c11941
MS
2150
2151#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2152 do { \
2153 bp->cnic_loaded = false; \
2154 goto label; \
2155 } while (0)
2156#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2157#define LOAD_ERROR_EXIT(bp, label) \
2158 do { \
2159 (bp)->state = BNX2X_STATE_ERROR; \
2160 (bp)->panic = 1; \
2161 return -EBUSY; \
2162 } while (0)
55c11941
MS
2163#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2164 do { \
2165 bp->cnic_loaded = false; \
2166 (bp)->panic = 1; \
2167 return -EBUSY; \
2168 } while (0)
2169#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2170
ad5afc89
AE
2171static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2172{
2173 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2174 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2175 return;
2176}
2177
2178static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2179{
8db573ba 2180 int num_groups, vf_headroom = 0;
ad5afc89 2181 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2182
ad5afc89
AE
2183 /* number of queues for statistics is number of eth queues + FCoE */
2184 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2185
ad5afc89
AE
2186 /* Total number of FW statistics requests =
2187 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2188 * and fcoe l2 queue) stats + num of queues (which includes another 1
2189 * for fcoe l2 queue if applicable)
2190 */
2191 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2192
8db573ba
AE
2193 /* vf stats appear in the request list, but their data is allocated by
2194 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2195 * it is used to determine where to place the vf stats queries in the
2196 * request struct
2197 */
2198 if (IS_SRIOV(bp))
6411280a 2199 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2200
ad5afc89
AE
2201 /* Request is built from stats_query_header and an array of
2202 * stats_query_cmd_group each of which contains
2203 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2204 * configured in the stats_query_header.
2205 */
2206 num_groups =
8db573ba
AE
2207 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2208 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2209 1 : 0));
2210
8db573ba
AE
2211 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2212 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2213 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2214 num_groups * sizeof(struct stats_query_cmd_group);
2215
2216 /* Data for statistics requests + stats_counter
2217 * stats_counter holds per-STORM counters that are incremented
2218 * when STORM has finished with the current request.
2219 * memory for FCoE offloaded statistics are counted anyway,
2220 * even if they will not be sent.
2221 * VF stats are not accounted for here as the data of VF stats is stored
2222 * in memory allocated by the VF, not here.
2223 */
2224 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2225 sizeof(struct per_pf_stats) +
2226 sizeof(struct fcoe_statistics_params) +
2227 sizeof(struct per_queue_stats) * num_queue_stats +
2228 sizeof(struct stats_counter);
2229
cd2b0389
JP
2230 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2231 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2232 if (!bp->fw_stats)
2233 goto alloc_mem_err;
ad5afc89
AE
2234
2235 /* Set shortcuts */
2236 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2237 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2238 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2239 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2240 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2241 bp->fw_stats_req_sz;
2242
6bf07b8e 2243 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2244 U64_HI(bp->fw_stats_req_mapping),
2245 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2246 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2247 U64_HI(bp->fw_stats_data_mapping),
2248 U64_LO(bp->fw_stats_data_mapping));
2249 return 0;
2250
2251alloc_mem_err:
2252 bnx2x_free_fw_stats_mem(bp);
2253 BNX2X_ERR("Can't allocate FW stats memory\n");
2254 return -ENOMEM;
2255}
2256
2257/* send load request to mcp and analyze response */
2258static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2259{
178135c1
DK
2260 u32 param;
2261
ad5afc89
AE
2262 /* init fw_seq */
2263 bp->fw_seq =
2264 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2265 DRV_MSG_SEQ_NUMBER_MASK);
2266 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2267
2268 /* Get current FW pulse sequence */
2269 bp->fw_drv_pulse_wr_seq =
2270 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2271 DRV_PULSE_SEQ_MASK);
2272 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2273
178135c1
DK
2274 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2275
2276 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2277 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2278
ad5afc89 2279 /* load request */
178135c1 2280 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2281
2282 /* if mcp fails to respond we must abort */
2283 if (!(*load_code)) {
2284 BNX2X_ERR("MCP response failure, aborting\n");
2285 return -EBUSY;
2286 }
2287
2288 /* If mcp refused (e.g. other port is in diagnostic mode) we
2289 * must abort
2290 */
2291 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2292 BNX2X_ERR("MCP refused load request, aborting\n");
2293 return -EBUSY;
2294 }
2295 return 0;
2296}
2297
2298/* check whether another PF has already loaded FW to chip. In
2299 * virtualized environments a pf from another VM may have already
2300 * initialized the device including loading FW
2301 */
91ebb929 2302int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
ad5afc89
AE
2303{
2304 /* is another pf loaded on this engine? */
2305 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2306 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2307 /* build my FW version dword */
2308 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2309 (BCM_5710_FW_MINOR_VERSION << 8) +
2310 (BCM_5710_FW_REVISION_VERSION << 16) +
2311 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2312
2313 /* read loaded FW from chip */
2314 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2315
2316 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2317 loaded_fw, my_fw);
2318
2319 /* abort nic load if version mismatch */
2320 if (my_fw != loaded_fw) {
91ebb929
YM
2321 if (print_err)
2322 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2323 loaded_fw, my_fw);
2324 else
2325 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2326 loaded_fw, my_fw);
ad5afc89
AE
2327 return -EBUSY;
2328 }
2329 }
2330 return 0;
2331}
2332
2333/* returns the "mcp load_code" according to global load_count array */
2334static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2335{
2336 int path = BP_PATH(bp);
2337
2338 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
a8f47eb7 2339 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2340 bnx2x_load_count[path][2]);
2341 bnx2x_load_count[path][0]++;
2342 bnx2x_load_count[path][1 + port]++;
ad5afc89 2343 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
a8f47eb7 2344 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2345 bnx2x_load_count[path][2]);
2346 if (bnx2x_load_count[path][0] == 1)
ad5afc89 2347 return FW_MSG_CODE_DRV_LOAD_COMMON;
a8f47eb7 2348 else if (bnx2x_load_count[path][1 + port] == 1)
ad5afc89
AE
2349 return FW_MSG_CODE_DRV_LOAD_PORT;
2350 else
2351 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2352}
2353
2354/* mark PMF if applicable */
2355static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2356{
2357 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2358 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2359 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2360 bp->port.pmf = 1;
2361 /* We need the barrier to ensure the ordering between the
2362 * writing to bp->port.pmf here and reading it from the
2363 * bnx2x_periodic_task().
2364 */
2365 smp_mb();
2366 } else {
2367 bp->port.pmf = 0;
452427b0
YM
2368 }
2369
ad5afc89
AE
2370 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2371}
2372
2373static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2374{
2375 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2376 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2377 (bp->common.shmem2_base)) {
2378 if (SHMEM2_HAS(bp, dcc_support))
2379 SHMEM2_WR(bp, dcc_support,
2380 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2381 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2382 if (SHMEM2_HAS(bp, afex_driver_support))
2383 SHMEM2_WR(bp, afex_driver_support,
2384 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2385 }
2386
2387 /* Set AFEX default VLAN tag to an invalid value */
2388 bp->afex_def_vlan_tag = -1;
452427b0
YM
2389}
2390
1191cb83
ED
2391/**
2392 * bnx2x_bz_fp - zero content of the fastpath structure.
2393 *
2394 * @bp: driver handle
2395 * @index: fastpath index to be zeroed
2396 *
2397 * Makes sure the contents of the bp->fp[index].napi is kept
2398 * intact.
2399 */
2400static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2401{
2402 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2403 int cos;
1191cb83 2404 struct napi_struct orig_napi = fp->napi;
15192a8c 2405 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2406
1191cb83 2407 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2408 if (fp->tpa_info)
2409 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2410 sizeof(struct bnx2x_agg_info));
2411 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2412
2413 /* Restore the NAPI object as it has been already initialized */
2414 fp->napi = orig_napi;
15192a8c 2415 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2416 fp->bp = bp;
2417 fp->index = index;
2418 if (IS_ETH_FP(fp))
2419 fp->max_cos = bp->max_cos;
2420 else
2421 /* Special queues support only one CoS */
2422 fp->max_cos = 1;
2423
65565884 2424 /* Init txdata pointers */
65565884
MS
2425 if (IS_FCOE_FP(fp))
2426 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2427 if (IS_ETH_FP(fp))
2428 for_each_cos_in_tx_queue(fp, cos)
2429 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2430 BNX2X_NUM_ETH_QUEUES(bp) + index];
2431
16a5fd92 2432 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2433 * minimal size so it must be set prior to queue memory allocation
2434 */
2435 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2436 (bp->flags & GRO_ENABLE_FLAG &&
2437 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2438 if (bp->flags & TPA_ENABLE_FLAG)
2439 fp->mode = TPA_MODE_LRO;
2440 else if (bp->flags & GRO_ENABLE_FLAG)
2441 fp->mode = TPA_MODE_GRO;
2442
1191cb83
ED
2443 /* We don't want TPA on an FCoE L2 ring */
2444 if (IS_FCOE_FP(fp))
2445 fp->disable_tpa = 1;
55c11941
MS
2446}
2447
2448int bnx2x_load_cnic(struct bnx2x *bp)
2449{
2450 int i, rc, port = BP_PORT(bp);
2451
2452 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2453
2454 mutex_init(&bp->cnic_mutex);
2455
ad5afc89
AE
2456 if (IS_PF(bp)) {
2457 rc = bnx2x_alloc_mem_cnic(bp);
2458 if (rc) {
2459 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2460 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2461 }
55c11941
MS
2462 }
2463
2464 rc = bnx2x_alloc_fp_mem_cnic(bp);
2465 if (rc) {
2466 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2467 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2468 }
2469
2470 /* Update the number of queues with the cnic queues */
2471 rc = bnx2x_set_real_num_queues(bp, 1);
2472 if (rc) {
2473 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2474 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2475 }
2476
2477 /* Add all CNIC NAPI objects */
2478 bnx2x_add_all_napi_cnic(bp);
2479 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2480 bnx2x_napi_enable_cnic(bp);
2481
2482 rc = bnx2x_init_hw_func_cnic(bp);
2483 if (rc)
2484 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2485
2486 bnx2x_nic_init_cnic(bp);
2487
ad5afc89
AE
2488 if (IS_PF(bp)) {
2489 /* Enable Timer scan */
2490 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2491
2492 /* setup cnic queues */
2493 for_each_cnic_queue(bp, i) {
2494 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2495 if (rc) {
2496 BNX2X_ERR("Queue setup failed\n");
2497 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2498 }
55c11941
MS
2499 }
2500 }
2501
2502 /* Initialize Rx filter. */
8b09be5f 2503 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2504
2505 /* re-read iscsi info */
2506 bnx2x_get_iscsi_info(bp);
2507 bnx2x_setup_cnic_irq_info(bp);
2508 bnx2x_setup_cnic_info(bp);
2509 bp->cnic_loaded = true;
2510 if (bp->state == BNX2X_STATE_OPEN)
2511 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2512
55c11941
MS
2513 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2514
2515 return 0;
2516
2517#ifndef BNX2X_STOP_ON_ERROR
2518load_error_cnic2:
2519 /* Disable Timer scan */
2520 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2521
2522load_error_cnic1:
2523 bnx2x_napi_disable_cnic(bp);
2524 /* Update the number of queues without the cnic queues */
d9d81862 2525 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2526 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2527load_error_cnic0:
2528 BNX2X_ERR("CNIC-related load failed\n");
2529 bnx2x_free_fp_mem_cnic(bp);
2530 bnx2x_free_mem_cnic(bp);
2531 return rc;
2532#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2533}
2534
9f6c9258
DK
2535/* must be called with rtnl_lock */
2536int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2537{
619c5cb6 2538 int port = BP_PORT(bp);
ad5afc89 2539 int i, rc = 0, load_code = 0;
9f6c9258 2540
55c11941
MS
2541 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2542 DP(NETIF_MSG_IFUP,
2543 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2544
9f6c9258 2545#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2546 if (unlikely(bp->panic)) {
2547 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2548 return -EPERM;
51c1a580 2549 }
9f6c9258
DK
2550#endif
2551
2552 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2553
16a5fd92 2554 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2555 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2556 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2557 &bp->last_reported_link.link_report_flags);
2ae17f66 2558
ad5afc89
AE
2559 if (IS_PF(bp))
2560 /* must be called before memory allocation and HW init */
2561 bnx2x_ilt_set_info(bp);
523224a3 2562
6383c0b3
AE
2563 /*
2564 * Zero fastpath structures preserving invariants like napi, which are
2565 * allocated only once, fp index, max_cos, bp pointer.
65565884 2566 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2567 */
51c1a580 2568 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2569 for_each_queue(bp, i)
2570 bnx2x_bz_fp(bp, i);
55c11941
MS
2571 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2572 bp->num_cnic_queues) *
2573 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2574
55c11941 2575 bp->fcoe_init = false;
6383c0b3 2576
a8c94b91
VZ
2577 /* Set the receive queues buffer size */
2578 bnx2x_set_rx_buf_size(bp);
2579
ad5afc89
AE
2580 if (IS_PF(bp)) {
2581 rc = bnx2x_alloc_mem(bp);
2582 if (rc) {
2583 BNX2X_ERR("Unable to allocate bp memory\n");
2584 return rc;
2585 }
2586 }
2587
ad5afc89
AE
2588 /* need to be done after alloc mem, since it's self adjusting to amount
2589 * of memory available for RSS queues
2590 */
2591 rc = bnx2x_alloc_fp_mem(bp);
2592 if (rc) {
2593 BNX2X_ERR("Unable to allocate memory for fps\n");
2594 LOAD_ERROR_EXIT(bp, load_error0);
2595 }
d6214d7a 2596
e3ed4eae
DK
2597 /* Allocated memory for FW statistics */
2598 if (bnx2x_alloc_fw_stats_mem(bp))
2599 LOAD_ERROR_EXIT(bp, load_error0);
2600
8d9ac297
AE
2601 /* request pf to initialize status blocks */
2602 if (IS_VF(bp)) {
2603 rc = bnx2x_vfpf_init(bp);
2604 if (rc)
2605 LOAD_ERROR_EXIT(bp, load_error0);
2606 }
2607
b3b83c3f
DK
2608 /* As long as bnx2x_alloc_mem() may possibly update
2609 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2610 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2611 */
55c11941 2612 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2613 if (rc) {
ec6ba945 2614 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2615 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2616 }
2617
6383c0b3 2618 /* configure multi cos mappings in kernel.
16a5fd92
YM
2619 * this configuration may be overridden by a multi class queue
2620 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2621 */
2622 bnx2x_setup_tc(bp->dev, bp->max_cos);
2623
26614ba5
MS
2624 /* Add all NAPI objects */
2625 bnx2x_add_all_napi(bp);
55c11941 2626 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2627 bnx2x_napi_enable(bp);
2628
ad5afc89
AE
2629 if (IS_PF(bp)) {
2630 /* set pf load just before approaching the MCP */
2631 bnx2x_set_pf_load(bp);
2632
2633 /* if mcp exists send load request and analyze response */
2634 if (!BP_NOMCP(bp)) {
2635 /* attempt to load pf */
2636 rc = bnx2x_nic_load_request(bp, &load_code);
2637 if (rc)
2638 LOAD_ERROR_EXIT(bp, load_error1);
2639
2640 /* what did mcp say? */
91ebb929 2641 rc = bnx2x_compare_fw_ver(bp, load_code, true);
ad5afc89
AE
2642 if (rc) {
2643 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2644 LOAD_ERROR_EXIT(bp, load_error2);
2645 }
ad5afc89
AE
2646 } else {
2647 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2648 }
9f6c9258 2649
ad5afc89
AE
2650 /* mark pmf if applicable */
2651 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2652
ad5afc89
AE
2653 /* Init Function state controlling object */
2654 bnx2x__init_func_obj(bp);
6383c0b3 2655
ad5afc89
AE
2656 /* Initialize HW */
2657 rc = bnx2x_init_hw(bp, load_code);
2658 if (rc) {
2659 BNX2X_ERR("HW init failed, aborting\n");
2660 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2661 LOAD_ERROR_EXIT(bp, load_error2);
2662 }
9f6c9258
DK
2663 }
2664
ecf01c22
YM
2665 bnx2x_pre_irq_nic_init(bp);
2666
d6214d7a
DK
2667 /* Connect to IRQs */
2668 rc = bnx2x_setup_irqs(bp);
523224a3 2669 if (rc) {
ad5afc89
AE
2670 BNX2X_ERR("setup irqs failed\n");
2671 if (IS_PF(bp))
2672 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2673 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2674 }
2675
619c5cb6 2676 /* Init per-function objects */
ad5afc89 2677 if (IS_PF(bp)) {
ecf01c22
YM
2678 /* Setup NIC internals and enable interrupts */
2679 bnx2x_post_irq_nic_init(bp, load_code);
2680
ad5afc89 2681 bnx2x_init_bp_objs(bp);
b56e9670 2682 bnx2x_iov_nic_init(bp);
a3348722 2683
ad5afc89
AE
2684 /* Set AFEX default VLAN tag to an invalid value */
2685 bp->afex_def_vlan_tag = -1;
2686 bnx2x_nic_load_afex_dcc(bp, load_code);
2687 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2688 rc = bnx2x_func_start(bp);
2689 if (rc) {
2690 BNX2X_ERR("Function start failed!\n");
2691 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2692
619c5cb6 2693 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2694 }
9f6c9258 2695
ad5afc89
AE
2696 /* Send LOAD_DONE command to MCP */
2697 if (!BP_NOMCP(bp)) {
2698 load_code = bnx2x_fw_command(bp,
2699 DRV_MSG_CODE_LOAD_DONE, 0);
2700 if (!load_code) {
2701 BNX2X_ERR("MCP response failure, aborting\n");
2702 rc = -EBUSY;
2703 LOAD_ERROR_EXIT(bp, load_error3);
2704 }
2705 }
9f6c9258 2706
0c14e5ce
AE
2707 /* initialize FW coalescing state machines in RAM */
2708 bnx2x_update_coalesce(bp);
60cad4e6 2709 }
0c14e5ce 2710
60cad4e6
AE
2711 /* setup the leading queue */
2712 rc = bnx2x_setup_leading(bp);
2713 if (rc) {
2714 BNX2X_ERR("Setup leading failed!\n");
2715 LOAD_ERROR_EXIT(bp, load_error3);
2716 }
ad5afc89 2717
60cad4e6
AE
2718 /* set up the rest of the queues */
2719 for_each_nondefault_eth_queue(bp, i) {
2720 if (IS_PF(bp))
2721 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2722 else /* VF */
2723 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2724 if (rc) {
60cad4e6 2725 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2726 LOAD_ERROR_EXIT(bp, load_error3);
2727 }
60cad4e6 2728 }
8d9ac297 2729
60cad4e6
AE
2730 /* setup rss */
2731 rc = bnx2x_init_rss(bp);
2732 if (rc) {
2733 BNX2X_ERR("PF RSS init failed\n");
2734 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2735 }
619c5cb6 2736
523224a3
DK
2737 /* Now when Clients are configured we are ready to work */
2738 bp->state = BNX2X_STATE_OPEN;
2739
619c5cb6 2740 /* Configure a ucast MAC */
ad5afc89
AE
2741 if (IS_PF(bp))
2742 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2743 else /* vf */
f8f4f61a
DK
2744 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2745 true);
51c1a580
MS
2746 if (rc) {
2747 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2748 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2749 }
6e30dd4e 2750
ad5afc89 2751 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2752 bnx2x_update_max_mf_config(bp, bp->pending_max);
2753 bp->pending_max = 0;
2754 }
2755
ad5afc89
AE
2756 if (bp->port.pmf) {
2757 rc = bnx2x_initial_phy_init(bp, load_mode);
2758 if (rc)
2759 LOAD_ERROR_EXIT(bp, load_error3);
2760 }
c63da990 2761 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2762
619c5cb6
VZ
2763 /* Start fast path */
2764
2765 /* Initialize Rx filter. */
8b09be5f 2766 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2767
619c5cb6 2768 /* Start the Tx */
9f6c9258
DK
2769 switch (load_mode) {
2770 case LOAD_NORMAL:
16a5fd92 2771 /* Tx queue should be only re-enabled */
523224a3 2772 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2773 break;
2774
2775 case LOAD_OPEN:
2776 netif_tx_start_all_queues(bp->dev);
523224a3 2777 smp_mb__after_clear_bit();
9f6c9258
DK
2778 break;
2779
2780 case LOAD_DIAG:
8970b2e4 2781 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2782 bp->state = BNX2X_STATE_DIAG;
2783 break;
2784
2785 default:
2786 break;
2787 }
2788
00253a8c 2789 if (bp->port.pmf)
4c704899 2790 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2791 else
9f6c9258
DK
2792 bnx2x__link_status_update(bp);
2793
2794 /* start the timer */
2795 mod_timer(&bp->timer, jiffies + bp->current_interval);
2796
55c11941
MS
2797 if (CNIC_ENABLED(bp))
2798 bnx2x_load_cnic(bp);
9f6c9258 2799
ad5afc89
AE
2800 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2801 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2802 u32 val;
2803 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2804 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2805 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2806 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2807 }
2808
619c5cb6 2809 /* Wait for all pending SP commands to complete */
ad5afc89 2810 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2811 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2812 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2813 return -EBUSY;
2814 }
6891dd25 2815
9876879f
BW
2816 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2817 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2818 bnx2x_dcbx_init(bp, false);
2819
55c11941
MS
2820 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2821
9f6c9258
DK
2822 return 0;
2823
619c5cb6 2824#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2825load_error3:
ad5afc89
AE
2826 if (IS_PF(bp)) {
2827 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2828
ad5afc89
AE
2829 /* Clean queueable objects */
2830 bnx2x_squeeze_objects(bp);
2831 }
619c5cb6 2832
9f6c9258
DK
2833 /* Free SKBs, SGEs, TPA pool and driver internals */
2834 bnx2x_free_skbs(bp);
ec6ba945 2835 for_each_rx_queue(bp, i)
9f6c9258 2836 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2837
9f6c9258 2838 /* Release IRQs */
d6214d7a
DK
2839 bnx2x_free_irq(bp);
2840load_error2:
ad5afc89 2841 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2842 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2843 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2844 }
2845
2846 bp->port.pmf = 0;
9f6c9258
DK
2847load_error1:
2848 bnx2x_napi_disable(bp);
722c6f58 2849 bnx2x_del_all_napi(bp);
ad5afc89 2850
889b9af3 2851 /* clear pf_load status, as it was already set */
ad5afc89
AE
2852 if (IS_PF(bp))
2853 bnx2x_clear_pf_load(bp);
d6214d7a 2854load_error0:
ad5afc89 2855 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2856 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2857 bnx2x_free_mem(bp);
2858
2859 return rc;
619c5cb6 2860#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2861}
2862
7fa6f340 2863int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2864{
2865 u8 rc = 0, cos, i;
2866
2867 /* Wait until tx fastpath tasks complete */
2868 for_each_tx_queue(bp, i) {
2869 struct bnx2x_fastpath *fp = &bp->fp[i];
2870
2871 for_each_cos_in_tx_queue(fp, cos)
2872 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2873 if (rc)
2874 return rc;
2875 }
2876 return 0;
2877}
2878
9f6c9258 2879/* must be called with rtnl_lock */
5d07d868 2880int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2881{
2882 int i;
c9ee9206
VZ
2883 bool global = false;
2884
55c11941
MS
2885 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2886
9ce392d4 2887 /* mark driver is unloaded in shmem2 */
ad5afc89 2888 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2889 u32 val;
2890 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2891 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2892 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2893 }
2894
80bfe5cc 2895 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2896 (bp->state == BNX2X_STATE_CLOSED ||
2897 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2898 /* We can get here if the driver has been unloaded
2899 * during parity error recovery and is either waiting for a
2900 * leader to complete or for other functions to unload and
2901 * then ifdown has been issued. In this case we want to
2902 * unload and let other functions to complete a recovery
2903 * process.
2904 */
9f6c9258
DK
2905 bp->recovery_state = BNX2X_RECOVERY_DONE;
2906 bp->is_leader = 0;
c9ee9206
VZ
2907 bnx2x_release_leader_lock(bp);
2908 smp_mb();
2909
51c1a580
MS
2910 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2911 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2912 return -EINVAL;
2913 }
2914
80bfe5cc 2915 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2916 * have not completed successfully - all resources are released.
80bfe5cc
YM
2917 *
2918 * we can get here only after unsuccessful ndo_* callback, during which
2919 * dev->IFF_UP flag is still on.
2920 */
2921 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2922 return 0;
2923
2924 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2925 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2926 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2927 */
2928 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2929 smp_mb();
2930
78c3bcc5
AE
2931 /* indicate to VFs that the PF is going down */
2932 bnx2x_iov_channel_down(bp);
2933
55c11941
MS
2934 if (CNIC_LOADED(bp))
2935 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2936
9505ee37
VZ
2937 /* Stop Tx */
2938 bnx2x_tx_disable(bp);
65565884 2939 netdev_reset_tc(bp->dev);
9505ee37 2940
9f6c9258 2941 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2942
9f6c9258 2943 del_timer_sync(&bp->timer);
f85582f8 2944
ad5afc89
AE
2945 if (IS_PF(bp)) {
2946 /* Set ALWAYS_ALIVE bit in shmem */
2947 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2948 bnx2x_drv_pulse(bp);
2949 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2950 bnx2x_save_statistics(bp);
2951 }
9f6c9258 2952
ad5afc89
AE
2953 /* wait till consumers catch up with producers in all queues */
2954 bnx2x_drain_tx_queues(bp);
9f6c9258 2955
9b176b6b
AE
2956 /* if VF indicate to PF this function is going down (PF will delete sp
2957 * elements and clear initializations
2958 */
2959 if (IS_VF(bp))
2960 bnx2x_vfpf_close_vf(bp);
2961 else if (unload_mode != UNLOAD_RECOVERY)
2962 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2963 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2964 else {
c9ee9206
VZ
2965 /* Send the UNLOAD_REQUEST to the MCP */
2966 bnx2x_send_unload_req(bp, unload_mode);
2967
16a5fd92 2968 /* Prevent transactions to host from the functions on the
c9ee9206 2969 * engine that doesn't reset global blocks in case of global
16a5fd92 2970 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
2971 * (the engine which leader will perform the recovery
2972 * last).
2973 */
2974 if (!CHIP_IS_E1x(bp))
2975 bnx2x_pf_disable(bp);
2976
2977 /* Disable HW interrupts, NAPI */
523224a3 2978 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2979 /* Delete all NAPI objects */
2980 bnx2x_del_all_napi(bp);
55c11941
MS
2981 if (CNIC_LOADED(bp))
2982 bnx2x_del_all_napi_cnic(bp);
523224a3 2983 /* Release IRQs */
d6214d7a 2984 bnx2x_free_irq(bp);
c9ee9206
VZ
2985
2986 /* Report UNLOAD_DONE to MCP */
5d07d868 2987 bnx2x_send_unload_done(bp, false);
523224a3 2988 }
9f6c9258 2989
619c5cb6 2990 /*
16a5fd92 2991 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
2992 * the queueable objects here in case they failed to get cleaned so far.
2993 */
ad5afc89
AE
2994 if (IS_PF(bp))
2995 bnx2x_squeeze_objects(bp);
619c5cb6 2996
79616895
VZ
2997 /* There should be no more pending SP commands at this stage */
2998 bp->sp_state = 0;
2999
9f6c9258
DK
3000 bp->port.pmf = 0;
3001
a0d307b2
DK
3002 /* clear pending work in rtnl task */
3003 bp->sp_rtnl_state = 0;
3004 smp_mb();
3005
9f6c9258
DK
3006 /* Free SKBs, SGEs, TPA pool and driver internals */
3007 bnx2x_free_skbs(bp);
55c11941
MS
3008 if (CNIC_LOADED(bp))
3009 bnx2x_free_skbs_cnic(bp);
ec6ba945 3010 for_each_rx_queue(bp, i)
9f6c9258 3011 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 3012
ad5afc89
AE
3013 bnx2x_free_fp_mem(bp);
3014 if (CNIC_LOADED(bp))
55c11941 3015 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 3016
ad5afc89 3017 if (IS_PF(bp)) {
ad5afc89
AE
3018 if (CNIC_LOADED(bp))
3019 bnx2x_free_mem_cnic(bp);
3020 }
b4cddbd6
AE
3021 bnx2x_free_mem(bp);
3022
9f6c9258 3023 bp->state = BNX2X_STATE_CLOSED;
55c11941 3024 bp->cnic_loaded = false;
9f6c9258 3025
c9ee9206
VZ
3026 /* Check if there are pending parity attentions. If there are - set
3027 * RECOVERY_IN_PROGRESS.
3028 */
ad5afc89 3029 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
3030 bnx2x_set_reset_in_progress(bp);
3031
3032 /* Set RESET_IS_GLOBAL if needed */
3033 if (global)
3034 bnx2x_set_reset_global(bp);
3035 }
3036
9f6c9258
DK
3037 /* The last driver must disable a "close the gate" if there is no
3038 * parity attention or "process kill" pending.
3039 */
ad5afc89
AE
3040 if (IS_PF(bp) &&
3041 !bnx2x_clear_pf_load(bp) &&
3042 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3043 bnx2x_disable_close_the_gate(bp);
3044
55c11941
MS
3045 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3046
9f6c9258
DK
3047 return 0;
3048}
f85582f8 3049
9f6c9258
DK
3050int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3051{
3052 u16 pmcsr;
3053
adf5f6a1 3054 /* If there is no power capability, silently succeed */
29ed74c3 3055 if (!bp->pdev->pm_cap) {
51c1a580 3056 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3057 return 0;
3058 }
3059
29ed74c3 3060 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3061
3062 switch (state) {
3063 case PCI_D0:
29ed74c3 3064 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3065 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3066 PCI_PM_CTRL_PME_STATUS));
3067
3068 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3069 /* delay required during transition out of D3hot */
3070 msleep(20);
3071 break;
3072
3073 case PCI_D3hot:
3074 /* If there are other clients above don't
3075 shut down the power */
3076 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3077 return 0;
3078 /* Don't shut down the power for emulation and FPGA */
3079 if (CHIP_REV_IS_SLOW(bp))
3080 return 0;
3081
3082 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3083 pmcsr |= 3;
3084
3085 if (bp->wol)
3086 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3087
29ed74c3 3088 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3089 pmcsr);
3090
3091 /* No more memory access after this point until
3092 * device is brought back to D0.
3093 */
3094 break;
3095
3096 default:
51c1a580 3097 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3098 return -EINVAL;
3099 }
3100 return 0;
3101}
3102
9f6c9258
DK
3103/*
3104 * net_device service functions
3105 */
a8f47eb7 3106static int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3107{
3108 int work_done = 0;
6383c0b3 3109 u8 cos;
9f6c9258
DK
3110 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3111 napi);
3112 struct bnx2x *bp = fp->bp;
3113
3114 while (1) {
3115#ifdef BNX2X_STOP_ON_ERROR
3116 if (unlikely(bp->panic)) {
3117 napi_complete(napi);
3118 return 0;
3119 }
3120#endif
8f20aa57
DK
3121 if (!bnx2x_fp_lock_napi(fp))
3122 return work_done;
9f6c9258 3123
6383c0b3 3124 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3125 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3126 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3127
9f6c9258
DK
3128 if (bnx2x_has_rx_work(fp)) {
3129 work_done += bnx2x_rx_int(fp, budget - work_done);
3130
3131 /* must not complete if we consumed full budget */
8f20aa57
DK
3132 if (work_done >= budget) {
3133 bnx2x_fp_unlock_napi(fp);
9f6c9258 3134 break;
8f20aa57 3135 }
9f6c9258
DK
3136 }
3137
3138 /* Fall out from the NAPI loop if needed */
8f20aa57
DK
3139 if (!bnx2x_fp_unlock_napi(fp) &&
3140 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3141
ec6ba945
VZ
3142 /* No need to update SB for FCoE L2 ring as long as
3143 * it's connected to the default SB and the SB
3144 * has been updated when NAPI was scheduled.
3145 */
3146 if (IS_FCOE_FP(fp)) {
3147 napi_complete(napi);
3148 break;
3149 }
9f6c9258 3150 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3151 /* bnx2x_has_rx_work() reads the status block,
3152 * thus we need to ensure that status block indices
3153 * have been actually read (bnx2x_update_fpsb_idx)
3154 * prior to this check (bnx2x_has_rx_work) so that
3155 * we won't write the "newer" value of the status block
3156 * to IGU (if there was a DMA right after
3157 * bnx2x_has_rx_work and if there is no rmb, the memory
3158 * reading (bnx2x_update_fpsb_idx) may be postponed
3159 * to right before bnx2x_ack_sb). In this case there
3160 * will never be another interrupt until there is
3161 * another update of the status block, while there
3162 * is still unhandled work.
3163 */
9f6c9258
DK
3164 rmb();
3165
3166 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3167 napi_complete(napi);
3168 /* Re-enable interrupts */
51c1a580 3169 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3170 "Update index to %d\n", fp->fp_hc_idx);
3171 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3172 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3173 IGU_INT_ENABLE, 1);
3174 break;
3175 }
3176 }
3177 }
3178
3179 return work_done;
3180}
3181
e0d1095a 3182#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3183/* must be called with local_bh_disable()d */
3184int bnx2x_low_latency_recv(struct napi_struct *napi)
3185{
3186 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3187 napi);
3188 struct bnx2x *bp = fp->bp;
3189 int found = 0;
3190
3191 if ((bp->state == BNX2X_STATE_CLOSED) ||
3192 (bp->state == BNX2X_STATE_ERROR) ||
3193 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3194 return LL_FLUSH_FAILED;
3195
3196 if (!bnx2x_fp_lock_poll(fp))
3197 return LL_FLUSH_BUSY;
3198
75b29459 3199 if (bnx2x_has_rx_work(fp))
8f20aa57 3200 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3201
3202 bnx2x_fp_unlock_poll(fp);
3203
3204 return found;
3205}
3206#endif
3207
9f6c9258
DK
3208/* we split the first BD into headers and data BDs
3209 * to ease the pain of our fellow microcode engineers
3210 * we use one mapping for both BDs
9f6c9258 3211 */
91226790
DK
3212static u16 bnx2x_tx_split(struct bnx2x *bp,
3213 struct bnx2x_fp_txdata *txdata,
3214 struct sw_tx_bd *tx_buf,
3215 struct eth_tx_start_bd **tx_bd, u16 hlen,
3216 u16 bd_prod)
9f6c9258
DK
3217{
3218 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3219 struct eth_tx_bd *d_tx_bd;
3220 dma_addr_t mapping;
3221 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3222
3223 /* first fix first BD */
9f6c9258
DK
3224 h_tx_bd->nbytes = cpu_to_le16(hlen);
3225
91226790
DK
3226 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3227 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3228
3229 /* now get a new data BD
3230 * (after the pbd) and fill it */
3231 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3232 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3233
3234 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3235 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3236
3237 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3238 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3239 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3240
3241 /* this marks the BD as one that has no individual mapping */
3242 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3243
3244 DP(NETIF_MSG_TX_QUEUED,
3245 "TSO split data size is %d (%x:%x)\n",
3246 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3247
3248 /* update tx_bd */
3249 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3250
3251 return bd_prod;
3252}
3253
86564c3f
YM
3254#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3255#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3256static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3257{
86564c3f
YM
3258 __sum16 tsum = (__force __sum16) csum;
3259
9f6c9258 3260 if (fix > 0)
86564c3f
YM
3261 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3262 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3263
3264 else if (fix < 0)
86564c3f
YM
3265 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3266 csum_partial(t_header, -fix, 0)));
9f6c9258 3267
e2593fcd 3268 return bswab16(tsum);
9f6c9258
DK
3269}
3270
91226790 3271static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3272{
3273 u32 rc;
a848ade4
DK
3274 __u8 prot = 0;
3275 __be16 protocol;
9f6c9258
DK
3276
3277 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3278 return XMIT_PLAIN;
9f6c9258 3279
a848ade4
DK
3280 protocol = vlan_get_protocol(skb);
3281 if (protocol == htons(ETH_P_IPV6)) {
3282 rc = XMIT_CSUM_V6;
3283 prot = ipv6_hdr(skb)->nexthdr;
3284 } else {
3285 rc = XMIT_CSUM_V4;
3286 prot = ip_hdr(skb)->protocol;
3287 }
9f6c9258 3288
a848ade4
DK
3289 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3290 if (inner_ip_hdr(skb)->version == 6) {
3291 rc |= XMIT_CSUM_ENC_V6;
3292 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3293 rc |= XMIT_CSUM_TCP;
9f6c9258 3294 } else {
a848ade4
DK
3295 rc |= XMIT_CSUM_ENC_V4;
3296 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3297 rc |= XMIT_CSUM_TCP;
3298 }
3299 }
a848ade4
DK
3300 if (prot == IPPROTO_TCP)
3301 rc |= XMIT_CSUM_TCP;
9f6c9258 3302
36a8f39e
ED
3303 if (skb_is_gso(skb)) {
3304 if (skb_is_gso_v6(skb)) {
3305 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3306 if (rc & XMIT_CSUM_ENC)
3307 rc |= XMIT_GSO_ENC_V6;
3308 } else {
3309 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3310 if (rc & XMIT_CSUM_ENC)
3311 rc |= XMIT_GSO_ENC_V4;
3312 }
a848ade4 3313 }
9f6c9258
DK
3314
3315 return rc;
3316}
3317
3318#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3319/* check if packet requires linearization (packet is too fragmented)
3320 no need to check fragmentation if page size > 8K (there will be no
3321 violation to FW restrictions) */
3322static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3323 u32 xmit_type)
3324{
3325 int to_copy = 0;
3326 int hlen = 0;
3327 int first_bd_sz = 0;
3328
3329 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3330 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3331
3332 if (xmit_type & XMIT_GSO) {
3333 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3334 /* Check if LSO packet needs to be copied:
3335 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3336 int wnd_size = MAX_FETCH_BD - 3;
3337 /* Number of windows to check */
3338 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3339 int wnd_idx = 0;
3340 int frag_idx = 0;
3341 u32 wnd_sum = 0;
3342
3343 /* Headers length */
3344 hlen = (int)(skb_transport_header(skb) - skb->data) +
3345 tcp_hdrlen(skb);
3346
3347 /* Amount of data (w/o headers) on linear part of SKB*/
3348 first_bd_sz = skb_headlen(skb) - hlen;
3349
3350 wnd_sum = first_bd_sz;
3351
3352 /* Calculate the first sum - it's special */
3353 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3354 wnd_sum +=
9e903e08 3355 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3356
3357 /* If there was data on linear skb data - check it */
3358 if (first_bd_sz > 0) {
3359 if (unlikely(wnd_sum < lso_mss)) {
3360 to_copy = 1;
3361 goto exit_lbl;
3362 }
3363
3364 wnd_sum -= first_bd_sz;
3365 }
3366
3367 /* Others are easier: run through the frag list and
3368 check all windows */
3369 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3370 wnd_sum +=
9e903e08 3371 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3372
3373 if (unlikely(wnd_sum < lso_mss)) {
3374 to_copy = 1;
3375 break;
3376 }
3377 wnd_sum -=
9e903e08 3378 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3379 }
3380 } else {
3381 /* in non-LSO too fragmented packet should always
3382 be linearized */
3383 to_copy = 1;
3384 }
3385 }
3386
3387exit_lbl:
3388 if (unlikely(to_copy))
3389 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3390 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3391 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3392 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3393
3394 return to_copy;
3395}
3396#endif
3397
91226790
DK
3398static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3399 u32 xmit_type)
f2e0899f 3400{
a848ade4
DK
3401 struct ipv6hdr *ipv6;
3402
2297a2da
VZ
3403 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3404 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3405 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3406
3407 if (xmit_type & XMIT_GSO_ENC_V6)
3408 ipv6 = inner_ipv6_hdr(skb);
3409 else if (xmit_type & XMIT_GSO_V6)
3410 ipv6 = ipv6_hdr(skb);
3411 else
3412 ipv6 = NULL;
3413
3414 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3415 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3416}
3417
3418/**
e8920674 3419 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3420 *
e8920674
DK
3421 * @skb: packet skb
3422 * @pbd: parse BD
3423 * @xmit_type: xmit flags
f2e0899f 3424 */
91226790
DK
3425static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3426 struct eth_tx_parse_bd_e1x *pbd,
057cf65e 3427 struct eth_tx_start_bd *tx_start_bd,
91226790 3428 u32 xmit_type)
f2e0899f
DK
3429{
3430 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3431 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3432 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3433
3434 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3435 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3436 pbd->tcp_pseudo_csum =
86564c3f
YM
3437 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3438 ip_hdr(skb)->daddr,
3439 0, IPPROTO_TCP, 0));
f2e0899f 3440
057cf65e
YM
3441 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3442 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3443 } else {
f2e0899f 3444 pbd->tcp_pseudo_csum =
86564c3f
YM
3445 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3446 &ipv6_hdr(skb)->daddr,
3447 0, IPPROTO_TCP, 0));
057cf65e 3448 }
f2e0899f 3449
86564c3f
YM
3450 pbd->global_data |=
3451 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3452}
f85582f8 3453
a848ade4
DK
3454/**
3455 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3456 *
3457 * @bp: driver handle
3458 * @skb: packet skb
3459 * @parsing_data: data to be updated
3460 * @xmit_type: xmit flags
3461 *
3462 * 57712/578xx related, when skb has encapsulation
3463 */
3464static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3465 u32 *parsing_data, u32 xmit_type)
3466{
3467 *parsing_data |=
3468 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3469 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3470 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3471
3472 if (xmit_type & XMIT_CSUM_TCP) {
3473 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3474 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3475 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3476
3477 return skb_inner_transport_header(skb) +
3478 inner_tcp_hdrlen(skb) - skb->data;
3479 }
3480
3481 /* We support checksum offload for TCP and UDP only.
3482 * No need to pass the UDP header length - it's a constant.
3483 */
3484 return skb_inner_transport_header(skb) +
3485 sizeof(struct udphdr) - skb->data;
3486}
3487
f2e0899f 3488/**
e8920674 3489 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3490 *
e8920674
DK
3491 * @bp: driver handle
3492 * @skb: packet skb
3493 * @parsing_data: data to be updated
3494 * @xmit_type: xmit flags
f2e0899f 3495 *
91226790 3496 * 57712/578xx related
f2e0899f 3497 */
91226790
DK
3498static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3499 u32 *parsing_data, u32 xmit_type)
f2e0899f 3500{
e39aece7 3501 *parsing_data |=
2de67439 3502 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3503 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3504 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3505
e39aece7
VZ
3506 if (xmit_type & XMIT_CSUM_TCP) {
3507 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3508 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3509 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3510
e39aece7 3511 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3512 }
3513 /* We support checksum offload for TCP and UDP only.
3514 * No need to pass the UDP header length - it's a constant.
3515 */
3516 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3517}
3518
a848ade4 3519/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3520static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3521 struct eth_tx_start_bd *tx_start_bd,
3522 u32 xmit_type)
93ef5c02 3523{
93ef5c02
DK
3524 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3525
a848ade4 3526 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3527 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3528
3529 if (!(xmit_type & XMIT_CSUM_TCP))
3530 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3531}
3532
f2e0899f 3533/**
e8920674 3534 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3535 *
e8920674
DK
3536 * @bp: driver handle
3537 * @skb: packet skb
3538 * @pbd: parse BD to be updated
3539 * @xmit_type: xmit flags
f2e0899f 3540 */
91226790
DK
3541static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3542 struct eth_tx_parse_bd_e1x *pbd,
3543 u32 xmit_type)
f2e0899f 3544{
e39aece7 3545 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3546
3547 /* for now NS flag is not used in Linux */
3548 pbd->global_data =
86564c3f
YM
3549 cpu_to_le16(hlen |
3550 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3551 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3552
3553 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3554 skb_network_header(skb)) >> 1;
f2e0899f 3555
e39aece7
VZ
3556 hlen += pbd->ip_hlen_w;
3557
3558 /* We support checksum offload for TCP and UDP only */
3559 if (xmit_type & XMIT_CSUM_TCP)
3560 hlen += tcp_hdrlen(skb) / 2;
3561 else
3562 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3563
3564 pbd->total_hlen_w = cpu_to_le16(hlen);
3565 hlen = hlen*2;
3566
3567 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3568 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3569
3570 } else {
3571 s8 fix = SKB_CS_OFF(skb); /* signed! */
3572
3573 DP(NETIF_MSG_TX_QUEUED,
3574 "hlen %d fix %d csum before fix %x\n",
3575 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3576
3577 /* HW bug: fixup the CSUM */
3578 pbd->tcp_pseudo_csum =
3579 bnx2x_csum_fix(skb_transport_header(skb),
3580 SKB_CS(skb), fix);
3581
3582 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3583 pbd->tcp_pseudo_csum);
3584 }
3585
3586 return hlen;
3587}
f85582f8 3588
a848ade4
DK
3589static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3590 struct eth_tx_parse_bd_e2 *pbd_e2,
3591 struct eth_tx_parse_2nd_bd *pbd2,
3592 u16 *global_data,
3593 u32 xmit_type)
3594{
e287a75c 3595 u16 hlen_w = 0;
a848ade4 3596 u8 outerip_off, outerip_len = 0;
e768fb29 3597
e287a75c
DK
3598 /* from outer IP to transport */
3599 hlen_w = (skb_inner_transport_header(skb) -
3600 skb_network_header(skb)) >> 1;
a848ade4
DK
3601
3602 /* transport len */
e768fb29 3603 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3604
e287a75c 3605 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3606
e768fb29
DK
3607 /* outer IP header info */
3608 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3609 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3610 u32 csum = (__force u32)(~iph->check) -
3611 (__force u32)iph->tot_len -
3612 (__force u32)iph->frag_off;
c957d09f 3613
a848ade4 3614 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3615 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3616 } else {
3617 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3618 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3619 }
3620
3621 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3622
3623 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3624
3625 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3626 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3627
3628 pbd_e2->data.tunnel_data.pseudo_csum =
3629 bswab16(~csum_tcpudp_magic(
3630 inner_ip_hdr(skb)->saddr,
3631 inner_ip_hdr(skb)->daddr,
3632 0, IPPROTO_TCP, 0));
3633
3634 outerip_len = ip_hdr(skb)->ihl << 1;
3635 } else {
3636 pbd_e2->data.tunnel_data.pseudo_csum =
3637 bswab16(~csum_ipv6_magic(
3638 &inner_ipv6_hdr(skb)->saddr,
3639 &inner_ipv6_hdr(skb)->daddr,
3640 0, IPPROTO_TCP, 0));
3641 }
3642
3643 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3644
3645 *global_data |=
3646 outerip_off |
3647 (!!(xmit_type & XMIT_CSUM_V6) <<
3648 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3649 (outerip_len <<
3650 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3651 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3652 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3653
3654 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3655 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3656 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3657 }
a848ade4
DK
3658}
3659
9f6c9258
DK
3660/* called with netif_tx_lock
3661 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3662 * netif_wake_queue()
3663 */
3664netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3665{
3666 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3667
9f6c9258 3668 struct netdev_queue *txq;
6383c0b3 3669 struct bnx2x_fp_txdata *txdata;
9f6c9258 3670 struct sw_tx_bd *tx_buf;
619c5cb6 3671 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3672 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3673 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3674 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3675 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3676 u32 pbd_e2_parsing_data = 0;
9f6c9258 3677 u16 pkt_prod, bd_prod;
65565884 3678 int nbd, txq_index;
9f6c9258
DK
3679 dma_addr_t mapping;
3680 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3681 int i;
3682 u8 hlen = 0;
3683 __le16 pkt_size = 0;
3684 struct ethhdr *eth;
3685 u8 mac_type = UNICAST_ADDRESS;
3686
3687#ifdef BNX2X_STOP_ON_ERROR
3688 if (unlikely(bp->panic))
3689 return NETDEV_TX_BUSY;
3690#endif
3691
6383c0b3
AE
3692 txq_index = skb_get_queue_mapping(skb);
3693 txq = netdev_get_tx_queue(dev, txq_index);
3694
55c11941 3695 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3696
65565884 3697 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3698
3699 /* enable this debug print to view the transmission queue being used
51c1a580 3700 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3701 txq_index, fp_index, txdata_index); */
9f6c9258 3702
16a5fd92 3703 /* enable this debug print to view the transmission details
51c1a580
MS
3704 DP(NETIF_MSG_TX_QUEUED,
3705 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3706 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3707
6383c0b3 3708 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3709 skb_shinfo(skb)->nr_frags +
3710 BDS_PER_TX_PKT +
3711 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3712 /* Handle special storage cases separately */
c96bdc0c
DK
3713 if (txdata->tx_ring_size == 0) {
3714 struct bnx2x_eth_q_stats *q_stats =
3715 bnx2x_fp_qstats(bp, txdata->parent_fp);
3716 q_stats->driver_filtered_tx_pkt++;
3717 dev_kfree_skb(skb);
3718 return NETDEV_TX_OK;
3719 }
2de67439
YM
3720 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3721 netif_tx_stop_queue(txq);
c96bdc0c 3722 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3723
9f6c9258
DK
3724 return NETDEV_TX_BUSY;
3725 }
3726
51c1a580 3727 DP(NETIF_MSG_TX_QUEUED,
04c46736 3728 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3729 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3730 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3731 skb->len);
9f6c9258
DK
3732
3733 eth = (struct ethhdr *)skb->data;
3734
3735 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3736 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3737 if (is_broadcast_ether_addr(eth->h_dest))
3738 mac_type = BROADCAST_ADDRESS;
3739 else
3740 mac_type = MULTICAST_ADDRESS;
3741 }
3742
91226790 3743#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3744 /* First, check if we need to linearize the skb (due to FW
3745 restrictions). No need to check fragmentation if page size > 8K
3746 (there will be no violation to FW restrictions) */
3747 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3748 /* Statistics of linearization */
3749 bp->lin_cnt++;
3750 if (skb_linearize(skb) != 0) {
51c1a580
MS
3751 DP(NETIF_MSG_TX_QUEUED,
3752 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3753 dev_kfree_skb_any(skb);
3754 return NETDEV_TX_OK;
3755 }
3756 }
3757#endif
619c5cb6
VZ
3758 /* Map skb linear data for DMA */
3759 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3760 skb_headlen(skb), DMA_TO_DEVICE);
3761 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3762 DP(NETIF_MSG_TX_QUEUED,
3763 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3764 dev_kfree_skb_any(skb);
3765 return NETDEV_TX_OK;
3766 }
9f6c9258
DK
3767 /*
3768 Please read carefully. First we use one BD which we mark as start,
3769 then we have a parsing info BD (used for TSO or xsum),
3770 and only then we have the rest of the TSO BDs.
3771 (don't forget to mark the last one as last,
3772 and to unmap only AFTER you write to the BD ...)
3773 And above all, all pdb sizes are in words - NOT DWORDS!
3774 */
3775
619c5cb6
VZ
3776 /* get current pkt produced now - advance it just before sending packet
3777 * since mapping of pages may fail and cause packet to be dropped
3778 */
6383c0b3
AE
3779 pkt_prod = txdata->tx_pkt_prod;
3780 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3781
619c5cb6
VZ
3782 /* get a tx_buf and first BD
3783 * tx_start_bd may be changed during SPLIT,
3784 * but first_bd will always stay first
3785 */
6383c0b3
AE
3786 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3787 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3788 first_bd = tx_start_bd;
9f6c9258
DK
3789
3790 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3791
91226790
DK
3792 /* header nbd: indirectly zero other flags! */
3793 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3794
3795 /* remember the first BD of the packet */
6383c0b3 3796 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3797 tx_buf->skb = skb;
3798 tx_buf->flags = 0;
3799
3800 DP(NETIF_MSG_TX_QUEUED,
3801 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3802 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3803
eab6d18d 3804 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3805 tx_start_bd->vlan_or_ethertype =
3806 cpu_to_le16(vlan_tx_tag_get(skb));
3807 tx_start_bd->bd_flags.as_bitfield |=
3808 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3809 } else {
3810 /* when transmitting in a vf, start bd must hold the ethertype
3811 * for fw to enforce it
3812 */
91226790 3813 if (IS_VF(bp))
dc1ba591
AE
3814 tx_start_bd->vlan_or_ethertype =
3815 cpu_to_le16(ntohs(eth->h_proto));
91226790 3816 else
dc1ba591
AE
3817 /* used by FW for packet accounting */
3818 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3819 }
9f6c9258 3820
91226790
DK
3821 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3822
9f6c9258
DK
3823 /* turn on parsing and get a BD */
3824 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3825
93ef5c02
DK
3826 if (xmit_type & XMIT_CSUM)
3827 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3828
619c5cb6 3829 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3830 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3831 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3832
3833 if (xmit_type & XMIT_CSUM_ENC) {
3834 u16 global_data = 0;
3835
3836 /* Set PBD in enc checksum offload case */
3837 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3838 &pbd_e2_parsing_data,
3839 xmit_type);
3840
3841 /* turn on 2nd parsing and get a BD */
3842 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3843
3844 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3845
3846 memset(pbd2, 0, sizeof(*pbd2));
3847
3848 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3849 (skb_inner_network_header(skb) -
3850 skb->data) >> 1;
3851
3852 if (xmit_type & XMIT_GSO_ENC)
3853 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3854 &global_data,
3855 xmit_type);
3856
3857 pbd2->global_data = cpu_to_le16(global_data);
3858
3859 /* add addition parse BD indication to start BD */
3860 SET_FLAG(tx_start_bd->general_data,
3861 ETH_TX_START_BD_PARSE_NBDS, 1);
3862 /* set encapsulation flag in start BD */
3863 SET_FLAG(tx_start_bd->general_data,
3864 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3865 nbd++;
3866 } else if (xmit_type & XMIT_CSUM) {
91226790 3867 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3868 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3869 &pbd_e2_parsing_data,
3870 xmit_type);
a848ade4 3871 }
dc1ba591 3872
91226790
DK
3873 /* Add the macs to the parsing BD this is a vf */
3874 if (IS_VF(bp)) {
3875 /* override GRE parameters in BD */
3876 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3877 &pbd_e2->data.mac_addr.src_mid,
3878 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3879 eth->h_source);
91226790
DK
3880
3881 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3882 &pbd_e2->data.mac_addr.dst_mid,
3883 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3884 eth->h_dest);
3885 }
96bed4b9
YM
3886
3887 SET_FLAG(pbd_e2_parsing_data,
3888 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3889 } else {
96bed4b9 3890 u16 global_data = 0;
6383c0b3 3891 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3892 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3893 /* Set PBD in checksum offload case */
3894 if (xmit_type & XMIT_CSUM)
3895 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3896
96bed4b9
YM
3897 SET_FLAG(global_data,
3898 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3899 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3900 }
3901
f85582f8 3902 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3903 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3904 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3905 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3906 pkt_size = tx_start_bd->nbytes;
3907
51c1a580 3908 DP(NETIF_MSG_TX_QUEUED,
91226790 3909 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3910 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3911 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3912 tx_start_bd->bd_flags.as_bitfield,
3913 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3914
3915 if (xmit_type & XMIT_GSO) {
3916
3917 DP(NETIF_MSG_TX_QUEUED,
3918 "TSO packet len %d hlen %d total len %d tso size %d\n",
3919 skb->len, hlen, skb_headlen(skb),
3920 skb_shinfo(skb)->gso_size);
3921
3922 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3923
91226790
DK
3924 if (unlikely(skb_headlen(skb) > hlen)) {
3925 nbd++;
6383c0b3
AE
3926 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3927 &tx_start_bd, hlen,
91226790
DK
3928 bd_prod);
3929 }
619c5cb6 3930 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3931 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3932 xmit_type);
f2e0899f 3933 else
44dbc78e 3934 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
9f6c9258 3935 }
2297a2da
VZ
3936
3937 /* Set the PBD's parsing_data field if not zero
3938 * (for the chips newer than 57711).
3939 */
3940 if (pbd_e2_parsing_data)
3941 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3942
9f6c9258
DK
3943 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3944
f85582f8 3945 /* Handle fragmented skb */
9f6c9258
DK
3946 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3947 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3948
9e903e08
ED
3949 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3950 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3951 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3952 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3953
51c1a580
MS
3954 DP(NETIF_MSG_TX_QUEUED,
3955 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3956
3957 /* we need unmap all buffers already mapped
3958 * for this SKB;
3959 * first_bd->nbd need to be properly updated
3960 * before call to bnx2x_free_tx_pkt
3961 */
3962 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3963 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3964 TX_BD(txdata->tx_pkt_prod),
3965 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3966 return NETDEV_TX_OK;
3967 }
3968
9f6c9258 3969 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3970 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3971 if (total_pkt_bd == NULL)
6383c0b3 3972 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3973
9f6c9258
DK
3974 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3975 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3976 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3977 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3978 nbd++;
9f6c9258
DK
3979
3980 DP(NETIF_MSG_TX_QUEUED,
3981 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3982 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3983 le16_to_cpu(tx_data_bd->nbytes));
3984 }
3985
3986 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3987
619c5cb6
VZ
3988 /* update with actual num BDs */
3989 first_bd->nbd = cpu_to_le16(nbd);
3990
9f6c9258
DK
3991 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3992
3993 /* now send a tx doorbell, counting the next BD
3994 * if the packet contains or ends with it
3995 */
3996 if (TX_BD_POFF(bd_prod) < nbd)
3997 nbd++;
3998
619c5cb6
VZ
3999 /* total_pkt_bytes should be set on the first data BD if
4000 * it's not an LSO packet and there is more than one
4001 * data BD. In this case pkt_size is limited by an MTU value.
4002 * However we prefer to set it for an LSO packet (while we don't
4003 * have to) in order to save some CPU cycles in a none-LSO
4004 * case, when we much more care about them.
4005 */
9f6c9258
DK
4006 if (total_pkt_bd != NULL)
4007 total_pkt_bd->total_pkt_bytes = pkt_size;
4008
523224a3 4009 if (pbd_e1x)
9f6c9258 4010 DP(NETIF_MSG_TX_QUEUED,
51c1a580 4011 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
4012 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4013 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4014 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4015 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
4016 if (pbd_e2)
4017 DP(NETIF_MSG_TX_QUEUED,
4018 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
4019 pbd_e2,
4020 pbd_e2->data.mac_addr.dst_hi,
4021 pbd_e2->data.mac_addr.dst_mid,
4022 pbd_e2->data.mac_addr.dst_lo,
4023 pbd_e2->data.mac_addr.src_hi,
4024 pbd_e2->data.mac_addr.src_mid,
4025 pbd_e2->data.mac_addr.src_lo,
f2e0899f 4026 pbd_e2->parsing_data);
9f6c9258
DK
4027 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4028
2df1a70a
TH
4029 netdev_tx_sent_queue(txq, skb->len);
4030
8373c57d
WB
4031 skb_tx_timestamp(skb);
4032
6383c0b3 4033 txdata->tx_pkt_prod++;
9f6c9258
DK
4034 /*
4035 * Make sure that the BD data is updated before updating the producer
4036 * since FW might read the BD right after the producer is updated.
4037 * This is only applicable for weak-ordered memory model archs such
4038 * as IA-64. The following barrier is also mandatory since FW will
4039 * assumes packets must have BDs.
4040 */
4041 wmb();
4042
6383c0b3 4043 txdata->tx_db.data.prod += nbd;
9f6c9258 4044 barrier();
f85582f8 4045
6383c0b3 4046 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4047
4048 mmiowb();
4049
6383c0b3 4050 txdata->tx_bd_prod += nbd;
9f6c9258 4051
7df2dc6b 4052 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4053 netif_tx_stop_queue(txq);
4054
4055 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4056 * ordering of set_bit() in netif_tx_stop_queue() and read of
4057 * fp->bd_tx_cons */
4058 smp_mb();
4059
15192a8c 4060 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4061 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4062 netif_tx_wake_queue(txq);
4063 }
6383c0b3 4064 txdata->tx_pkt++;
9f6c9258
DK
4065
4066 return NETDEV_TX_OK;
4067}
f85582f8 4068
6383c0b3
AE
4069/**
4070 * bnx2x_setup_tc - routine to configure net_device for multi tc
4071 *
4072 * @netdev: net device to configure
4073 * @tc: number of traffic classes to enable
4074 *
4075 * callback connected to the ndo_setup_tc function pointer
4076 */
4077int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4078{
4079 int cos, prio, count, offset;
4080 struct bnx2x *bp = netdev_priv(dev);
4081
4082 /* setup tc must be called under rtnl lock */
4083 ASSERT_RTNL();
4084
16a5fd92 4085 /* no traffic classes requested. Aborting */
6383c0b3
AE
4086 if (!num_tc) {
4087 netdev_reset_tc(dev);
4088 return 0;
4089 }
4090
4091 /* requested to support too many traffic classes */
4092 if (num_tc > bp->max_cos) {
6bf07b8e 4093 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4094 num_tc, bp->max_cos);
6383c0b3
AE
4095 return -EINVAL;
4096 }
4097
4098 /* declare amount of supported traffic classes */
4099 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4100 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4101 return -EINVAL;
4102 }
4103
4104 /* configure priority to traffic class mapping */
4105 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4106 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4107 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4108 "mapping priority %d to tc %d\n",
6383c0b3
AE
4109 prio, bp->prio_to_cos[prio]);
4110 }
4111
16a5fd92 4112 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4113 This can be used for ets or pfc, and save the effort of setting
4114 up a multio class queue disc or negotiating DCBX with a switch
4115 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4116 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4117 for (prio = 1; prio < 16; prio++) {
4118 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4119 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4120 } */
4121
4122 /* configure traffic class to transmission queue mapping */
4123 for (cos = 0; cos < bp->max_cos; cos++) {
4124 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4125 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4126 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4127 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4128 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4129 cos, offset, count);
4130 }
4131
4132 return 0;
4133}
4134
9f6c9258
DK
4135/* called with rtnl_lock */
4136int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4137{
4138 struct sockaddr *addr = p;
4139 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4140 int rc = 0;
9f6c9258 4141
51c1a580
MS
4142 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4143 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4144 return -EINVAL;
51c1a580 4145 }
614c76df 4146
a3348722
BW
4147 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4148 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4149 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4150 return -EINVAL;
51c1a580 4151 }
9f6c9258 4152
619c5cb6
VZ
4153 if (netif_running(dev)) {
4154 rc = bnx2x_set_eth_mac(bp, false);
4155 if (rc)
4156 return rc;
4157 }
4158
9f6c9258 4159 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4160
523224a3 4161 if (netif_running(dev))
619c5cb6 4162 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4163
619c5cb6 4164 return rc;
9f6c9258
DK
4165}
4166
b3b83c3f
DK
4167static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4168{
4169 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4170 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4171 u8 cos;
b3b83c3f
DK
4172
4173 /* Common */
55c11941 4174
b3b83c3f
DK
4175 if (IS_FCOE_IDX(fp_index)) {
4176 memset(sb, 0, sizeof(union host_hc_status_block));
4177 fp->status_blk_mapping = 0;
b3b83c3f 4178 } else {
b3b83c3f 4179 /* status blocks */
619c5cb6 4180 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4181 BNX2X_PCI_FREE(sb->e2_sb,
4182 bnx2x_fp(bp, fp_index,
4183 status_blk_mapping),
4184 sizeof(struct host_hc_status_block_e2));
4185 else
4186 BNX2X_PCI_FREE(sb->e1x_sb,
4187 bnx2x_fp(bp, fp_index,
4188 status_blk_mapping),
4189 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4190 }
55c11941 4191
b3b83c3f
DK
4192 /* Rx */
4193 if (!skip_rx_queue(bp, fp_index)) {
4194 bnx2x_free_rx_bds(fp);
4195
4196 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4197 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4198 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4199 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4200 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4201
4202 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4203 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4204 sizeof(struct eth_fast_path_rx_cqe) *
4205 NUM_RCQ_BD);
4206
4207 /* SGE ring */
4208 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4209 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4210 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4211 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4212 }
4213
4214 /* Tx */
4215 if (!skip_tx_queue(bp, fp_index)) {
4216 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4217 for_each_cos_in_tx_queue(fp, cos) {
65565884 4218 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4219
51c1a580 4220 DP(NETIF_MSG_IFDOWN,
94f05b0f 4221 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4222 fp_index, cos, txdata->cid);
4223
4224 BNX2X_FREE(txdata->tx_buf_ring);
4225 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4226 txdata->tx_desc_mapping,
4227 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4228 }
b3b83c3f
DK
4229 }
4230 /* end of fastpath */
4231}
4232
a8f47eb7 4233static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4234{
4235 int i;
4236 for_each_cnic_queue(bp, i)
4237 bnx2x_free_fp_mem_at(bp, i);
4238}
4239
b3b83c3f
DK
4240void bnx2x_free_fp_mem(struct bnx2x *bp)
4241{
4242 int i;
55c11941 4243 for_each_eth_queue(bp, i)
b3b83c3f
DK
4244 bnx2x_free_fp_mem_at(bp, i);
4245}
4246
1191cb83 4247static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4248{
4249 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4250 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4251 bnx2x_fp(bp, index, sb_index_values) =
4252 (__le16 *)status_blk.e2_sb->sb.index_values;
4253 bnx2x_fp(bp, index, sb_running_index) =
4254 (__le16 *)status_blk.e2_sb->sb.running_index;
4255 } else {
4256 bnx2x_fp(bp, index, sb_index_values) =
4257 (__le16 *)status_blk.e1x_sb->sb.index_values;
4258 bnx2x_fp(bp, index, sb_running_index) =
4259 (__le16 *)status_blk.e1x_sb->sb.running_index;
4260 }
4261}
4262
1191cb83
ED
4263/* Returns the number of actually allocated BDs */
4264static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4265 int rx_ring_size)
4266{
4267 struct bnx2x *bp = fp->bp;
4268 u16 ring_prod, cqe_ring_prod;
4269 int i, failure_cnt = 0;
4270
4271 fp->rx_comp_cons = 0;
4272 cqe_ring_prod = ring_prod = 0;
4273
4274 /* This routine is called only during fo init so
4275 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4276 */
4277 for (i = 0; i < rx_ring_size; i++) {
996dedba 4278 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4279 failure_cnt++;
4280 continue;
4281 }
4282 ring_prod = NEXT_RX_IDX(ring_prod);
4283 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4284 WARN_ON(ring_prod <= (i - failure_cnt));
4285 }
4286
4287 if (failure_cnt)
4288 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4289 i - failure_cnt, fp->index);
4290
4291 fp->rx_bd_prod = ring_prod;
4292 /* Limit the CQE producer by the CQE ring size */
4293 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4294 cqe_ring_prod);
4295 fp->rx_pkt = fp->rx_calls = 0;
4296
15192a8c 4297 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4298
4299 return i - failure_cnt;
4300}
4301
4302static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4303{
4304 int i;
4305
4306 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4307 struct eth_rx_cqe_next_page *nextpg;
4308
4309 nextpg = (struct eth_rx_cqe_next_page *)
4310 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4311 nextpg->addr_hi =
4312 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4313 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4314 nextpg->addr_lo =
4315 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4316 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4317 }
4318}
4319
b3b83c3f
DK
4320static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4321{
4322 union host_hc_status_block *sb;
4323 struct bnx2x_fastpath *fp = &bp->fp[index];
4324 int ring_size = 0;
6383c0b3 4325 u8 cos;
c2188952 4326 int rx_ring_size = 0;
b3b83c3f 4327
a3348722
BW
4328 if (!bp->rx_ring_size &&
4329 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4330 rx_ring_size = MIN_RX_SIZE_NONTPA;
4331 bp->rx_ring_size = rx_ring_size;
55c11941 4332 } else if (!bp->rx_ring_size) {
c2188952
VZ
4333 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4334
065f8b92
YM
4335 if (CHIP_IS_E3(bp)) {
4336 u32 cfg = SHMEM_RD(bp,
4337 dev_info.port_hw_config[BP_PORT(bp)].
4338 default_cfg);
4339
4340 /* Decrease ring size for 1G functions */
4341 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4342 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4343 rx_ring_size /= 10;
4344 }
d760fc37 4345
c2188952
VZ
4346 /* allocate at least number of buffers required by FW */
4347 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4348 MIN_RX_SIZE_TPA, rx_ring_size);
4349
4350 bp->rx_ring_size = rx_ring_size;
614c76df 4351 } else /* if rx_ring_size specified - use it */
c2188952 4352 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4353
04c46736
YM
4354 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4355
b3b83c3f
DK
4356 /* Common */
4357 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4358
b3b83c3f 4359 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4360 /* status blocks */
cd2b0389
JP
4361 if (!CHIP_IS_E1x(bp)) {
4362 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4363 sizeof(struct host_hc_status_block_e2));
4364 if (!sb->e2_sb)
4365 goto alloc_mem_err;
4366 } else {
4367 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4368 sizeof(struct host_hc_status_block_e1x));
4369 if (!sb->e1x_sb)
4370 goto alloc_mem_err;
4371 }
b3b83c3f 4372 }
8eef2af1
DK
4373
4374 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4375 * set shortcuts for it.
4376 */
4377 if (!IS_FCOE_IDX(index))
4378 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4379
4380 /* Tx */
4381 if (!skip_tx_queue(bp, index)) {
4382 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4383 for_each_cos_in_tx_queue(fp, cos) {
65565884 4384 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4385
51c1a580
MS
4386 DP(NETIF_MSG_IFUP,
4387 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4388 index, cos);
4389
cd2b0389
JP
4390 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4391 sizeof(struct sw_tx_bd),
4392 GFP_KERNEL);
4393 if (!txdata->tx_buf_ring)
4394 goto alloc_mem_err;
4395 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4396 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4397 if (!txdata->tx_desc_ring)
4398 goto alloc_mem_err;
6383c0b3 4399 }
b3b83c3f
DK
4400 }
4401
4402 /* Rx */
4403 if (!skip_rx_queue(bp, index)) {
4404 /* fastpath rx rings: rx_buf rx_desc rx_comp */
cd2b0389
JP
4405 bnx2x_fp(bp, index, rx_buf_ring) =
4406 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4407 if (!bnx2x_fp(bp, index, rx_buf_ring))
4408 goto alloc_mem_err;
4409 bnx2x_fp(bp, index, rx_desc_ring) =
4410 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4411 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4412 if (!bnx2x_fp(bp, index, rx_desc_ring))
4413 goto alloc_mem_err;
b3b83c3f 4414
75b29459 4415 /* Seed all CQEs by 1s */
cd2b0389
JP
4416 bnx2x_fp(bp, index, rx_comp_ring) =
4417 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4418 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4419 if (!bnx2x_fp(bp, index, rx_comp_ring))
4420 goto alloc_mem_err;
b3b83c3f
DK
4421
4422 /* SGE ring */
cd2b0389
JP
4423 bnx2x_fp(bp, index, rx_page_ring) =
4424 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4425 GFP_KERNEL);
4426 if (!bnx2x_fp(bp, index, rx_page_ring))
4427 goto alloc_mem_err;
4428 bnx2x_fp(bp, index, rx_sge_ring) =
4429 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4430 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4431 if (!bnx2x_fp(bp, index, rx_sge_ring))
4432 goto alloc_mem_err;
b3b83c3f
DK
4433 /* RX BD ring */
4434 bnx2x_set_next_page_rx_bd(fp);
4435
4436 /* CQ ring */
4437 bnx2x_set_next_page_rx_cq(fp);
4438
4439 /* BDs */
4440 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4441 if (ring_size < rx_ring_size)
4442 goto alloc_mem_err;
4443 }
4444
4445 return 0;
4446
4447/* handles low memory cases */
4448alloc_mem_err:
4449 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4450 index, ring_size);
4451 /* FW will drop all packets if queue is not big enough,
4452 * In these cases we disable the queue
6383c0b3 4453 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4454 */
4455 if (ring_size < (fp->disable_tpa ?
eb722d7a 4456 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4457 /* release memory allocated for this queue */
4458 bnx2x_free_fp_mem_at(bp, index);
4459 return -ENOMEM;
4460 }
4461 return 0;
4462}
4463
a8f47eb7 4464static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4465{
4466 if (!NO_FCOE(bp))
4467 /* FCoE */
4468 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4469 /* we will fail load process instead of mark
4470 * NO_FCOE_FLAG
4471 */
4472 return -ENOMEM;
4473
4474 return 0;
4475}
4476
a8f47eb7 4477static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
b3b83c3f
DK
4478{
4479 int i;
4480
55c11941
MS
4481 /* 1. Allocate FP for leading - fatal if error
4482 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4483 */
4484
4485 /* leading */
4486 if (bnx2x_alloc_fp_mem_at(bp, 0))
4487 return -ENOMEM;
6383c0b3 4488
b3b83c3f
DK
4489 /* RSS */
4490 for_each_nondefault_eth_queue(bp, i)
4491 if (bnx2x_alloc_fp_mem_at(bp, i))
4492 break;
4493
4494 /* handle memory failures */
4495 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4496 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4497
4498 WARN_ON(delta < 0);
4864a16a 4499 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4500 if (CNIC_SUPPORT(bp))
4501 /* move non eth FPs next to last eth FP
4502 * must be done in that order
4503 * FCOE_IDX < FWD_IDX < OOO_IDX
4504 */
b3b83c3f 4505
55c11941
MS
4506 /* move FCoE fp even NO_FCOE_FLAG is on */
4507 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4508 bp->num_ethernet_queues -= delta;
4509 bp->num_queues = bp->num_ethernet_queues +
4510 bp->num_cnic_queues;
b3b83c3f
DK
4511 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4512 bp->num_queues + delta, bp->num_queues);
4513 }
4514
4515 return 0;
4516}
d6214d7a 4517
523224a3
DK
4518void bnx2x_free_mem_bp(struct bnx2x *bp)
4519{
c3146eb6
DK
4520 int i;
4521
4522 for (i = 0; i < bp->fp_array_size; i++)
4523 kfree(bp->fp[i].tpa_info);
523224a3 4524 kfree(bp->fp);
15192a8c
BW
4525 kfree(bp->sp_objs);
4526 kfree(bp->fp_stats);
65565884 4527 kfree(bp->bnx2x_txq);
523224a3
DK
4528 kfree(bp->msix_table);
4529 kfree(bp->ilt);
4530}
4531
0329aba1 4532int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4533{
4534 struct bnx2x_fastpath *fp;
4535 struct msix_entry *tbl;
4536 struct bnx2x_ilt *ilt;
6383c0b3 4537 int msix_table_size = 0;
55c11941 4538 int fp_array_size, txq_array_size;
15192a8c 4539 int i;
6383c0b3
AE
4540
4541 /*
4542 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4543 * path IGU SBs plus default SB (for PF only).
6383c0b3 4544 */
1ab4434c
AE
4545 msix_table_size = bp->igu_sb_cnt;
4546 if (IS_PF(bp))
4547 msix_table_size++;
4548 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4549
6383c0b3 4550 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4551 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4552 bp->fp_array_size = fp_array_size;
4553 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4554
c3146eb6 4555 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4556 if (!fp)
4557 goto alloc_err;
c3146eb6 4558 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4559 fp[i].tpa_info =
4560 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4561 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4562 if (!(fp[i].tpa_info))
4563 goto alloc_err;
4564 }
4565
523224a3
DK
4566 bp->fp = fp;
4567
15192a8c 4568 /* allocate sp objs */
c3146eb6 4569 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4570 GFP_KERNEL);
4571 if (!bp->sp_objs)
4572 goto alloc_err;
4573
4574 /* allocate fp_stats */
c3146eb6 4575 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4576 GFP_KERNEL);
4577 if (!bp->fp_stats)
4578 goto alloc_err;
4579
65565884 4580 /* Allocate memory for the transmission queues array */
55c11941
MS
4581 txq_array_size =
4582 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4583 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4584
4585 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4586 GFP_KERNEL);
65565884
MS
4587 if (!bp->bnx2x_txq)
4588 goto alloc_err;
4589
523224a3 4590 /* msix table */
01e23742 4591 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4592 if (!tbl)
4593 goto alloc_err;
4594 bp->msix_table = tbl;
4595
4596 /* ilt */
4597 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4598 if (!ilt)
4599 goto alloc_err;
4600 bp->ilt = ilt;
4601
4602 return 0;
4603alloc_err:
4604 bnx2x_free_mem_bp(bp);
4605 return -ENOMEM;
523224a3
DK
4606}
4607
a9fccec7 4608int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4609{
4610 struct bnx2x *bp = netdev_priv(dev);
4611
4612 if (unlikely(!netif_running(dev)))
4613 return 0;
4614
5d07d868 4615 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4616 return bnx2x_nic_load(bp, LOAD_NORMAL);
4617}
4618
1ac9e428
YR
4619int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4620{
4621 u32 sel_phy_idx = 0;
4622 if (bp->link_params.num_phys <= 1)
4623 return INT_PHY;
4624
4625 if (bp->link_vars.link_up) {
4626 sel_phy_idx = EXT_PHY1;
4627 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4628 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4629 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4630 sel_phy_idx = EXT_PHY2;
4631 } else {
4632
4633 switch (bnx2x_phy_selection(&bp->link_params)) {
4634 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4635 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4636 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4637 sel_phy_idx = EXT_PHY1;
4638 break;
4639 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4640 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4641 sel_phy_idx = EXT_PHY2;
4642 break;
4643 }
4644 }
4645
4646 return sel_phy_idx;
1ac9e428
YR
4647}
4648int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4649{
4650 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4651 /*
2de67439 4652 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4653 * swapping is enabled). So when swapping is enabled, we need to reverse
4654 * the configuration
4655 */
4656
4657 if (bp->link_params.multi_phy_config &
4658 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4659 if (sel_phy_idx == EXT_PHY1)
4660 sel_phy_idx = EXT_PHY2;
4661 else if (sel_phy_idx == EXT_PHY2)
4662 sel_phy_idx = EXT_PHY1;
4663 }
4664 return LINK_CONFIG_IDX(sel_phy_idx);
4665}
4666
55c11941 4667#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4668int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4669{
4670 struct bnx2x *bp = netdev_priv(dev);
4671 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4672
4673 switch (type) {
4674 case NETDEV_FCOE_WWNN:
4675 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4676 cp->fcoe_wwn_node_name_lo);
4677 break;
4678 case NETDEV_FCOE_WWPN:
4679 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4680 cp->fcoe_wwn_port_name_lo);
4681 break;
4682 default:
51c1a580 4683 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4684 return -EINVAL;
4685 }
4686
4687 return 0;
4688}
4689#endif
4690
9f6c9258
DK
4691/* called with rtnl_lock */
4692int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4693{
4694 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4695
4696 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4697 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4698 return -EAGAIN;
4699 }
4700
4701 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4702 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4703 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4704 return -EINVAL;
51c1a580 4705 }
9f6c9258
DK
4706
4707 /* This does not race with packet allocation
4708 * because the actual alloc size is
4709 * only updated as part of load
4710 */
4711 dev->mtu = new_mtu;
4712
66371c44
MM
4713 return bnx2x_reload_if_running(dev);
4714}
4715
c8f44aff 4716netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4717 netdev_features_t features)
66371c44
MM
4718{
4719 struct bnx2x *bp = netdev_priv(dev);
4720
4721 /* TPA requires Rx CSUM offloading */
621b4d66 4722 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4723 features &= ~NETIF_F_LRO;
621b4d66
DK
4724 features &= ~NETIF_F_GRO;
4725 }
66371c44
MM
4726
4727 return features;
4728}
4729
c8f44aff 4730int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4731{
4732 struct bnx2x *bp = netdev_priv(dev);
4733 u32 flags = bp->flags;
8802f579 4734 u32 changes;
538dd2e3 4735 bool bnx2x_reload = false;
66371c44
MM
4736
4737 if (features & NETIF_F_LRO)
4738 flags |= TPA_ENABLE_FLAG;
4739 else
4740 flags &= ~TPA_ENABLE_FLAG;
4741
621b4d66
DK
4742 if (features & NETIF_F_GRO)
4743 flags |= GRO_ENABLE_FLAG;
4744 else
4745 flags &= ~GRO_ENABLE_FLAG;
4746
538dd2e3
MB
4747 if (features & NETIF_F_LOOPBACK) {
4748 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4749 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4750 bnx2x_reload = true;
4751 }
4752 } else {
4753 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4754 bp->link_params.loopback_mode = LOOPBACK_NONE;
4755 bnx2x_reload = true;
4756 }
4757 }
4758
8802f579
ED
4759 changes = flags ^ bp->flags;
4760
16a5fd92 4761 /* if GRO is changed while LRO is enabled, don't force a reload */
8802f579
ED
4762 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4763 changes &= ~GRO_ENABLE_FLAG;
4764
4765 if (changes)
538dd2e3 4766 bnx2x_reload = true;
8802f579
ED
4767
4768 bp->flags = flags;
66371c44 4769
538dd2e3 4770 if (bnx2x_reload) {
66371c44
MM
4771 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4772 return bnx2x_reload_if_running(dev);
4773 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4774 }
4775
66371c44 4776 return 0;
9f6c9258
DK
4777}
4778
4779void bnx2x_tx_timeout(struct net_device *dev)
4780{
4781 struct bnx2x *bp = netdev_priv(dev);
4782
4783#ifdef BNX2X_STOP_ON_ERROR
4784 if (!bp->panic)
4785 bnx2x_panic();
4786#endif
7be08a72 4787
9f6c9258 4788 /* This allows the netif to be shutdown gracefully before resetting */
230bb0f3 4789 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
9f6c9258
DK
4790}
4791
9f6c9258
DK
4792int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4793{
4794 struct net_device *dev = pci_get_drvdata(pdev);
4795 struct bnx2x *bp;
4796
4797 if (!dev) {
4798 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4799 return -ENODEV;
4800 }
4801 bp = netdev_priv(dev);
4802
4803 rtnl_lock();
4804
4805 pci_save_state(pdev);
4806
4807 if (!netif_running(dev)) {
4808 rtnl_unlock();
4809 return 0;
4810 }
4811
4812 netif_device_detach(dev);
4813
5d07d868 4814 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4815
4816 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4817
4818 rtnl_unlock();
4819
4820 return 0;
4821}
4822
4823int bnx2x_resume(struct pci_dev *pdev)
4824{
4825 struct net_device *dev = pci_get_drvdata(pdev);
4826 struct bnx2x *bp;
4827 int rc;
4828
4829 if (!dev) {
4830 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4831 return -ENODEV;
4832 }
4833 bp = netdev_priv(dev);
4834
4835 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4836 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4837 return -EAGAIN;
4838 }
4839
4840 rtnl_lock();
4841
4842 pci_restore_state(pdev);
4843
4844 if (!netif_running(dev)) {
4845 rtnl_unlock();
4846 return 0;
4847 }
4848
4849 bnx2x_set_power_state(bp, PCI_D0);
4850 netif_device_attach(dev);
4851
4852 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4853
4854 rtnl_unlock();
4855
4856 return rc;
4857}
619c5cb6 4858
619c5cb6
VZ
4859void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4860 u32 cid)
4861{
b9871bcf
AE
4862 if (!cxt) {
4863 BNX2X_ERR("bad context pointer %p\n", cxt);
4864 return;
4865 }
4866
619c5cb6
VZ
4867 /* ustorm cxt validation */
4868 cxt->ustorm_ag_context.cdu_usage =
4869 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4870 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4871 /* xcontext validation */
4872 cxt->xstorm_ag_context.cdu_reserved =
4873 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4874 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4875}
4876
1191cb83
ED
4877static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4878 u8 fw_sb_id, u8 sb_index,
4879 u8 ticks)
619c5cb6 4880{
619c5cb6
VZ
4881 u32 addr = BAR_CSTRORM_INTMEM +
4882 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4883 REG_WR8(bp, addr, ticks);
51c1a580
MS
4884 DP(NETIF_MSG_IFUP,
4885 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4886 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4887}
4888
1191cb83
ED
4889static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4890 u16 fw_sb_id, u8 sb_index,
4891 u8 disable)
619c5cb6
VZ
4892{
4893 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4894 u32 addr = BAR_CSTRORM_INTMEM +
4895 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 4896 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
4897 /* clear and set */
4898 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4899 flags |= enable_flag;
0c14e5ce 4900 REG_WR8(bp, addr, flags);
51c1a580
MS
4901 DP(NETIF_MSG_IFUP,
4902 "port %x fw_sb_id %d sb_index %d disable %d\n",
4903 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4904}
4905
4906void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4907 u8 sb_index, u8 disable, u16 usec)
4908{
4909 int port = BP_PORT(bp);
4910 u8 ticks = usec / BNX2X_BTR;
4911
4912 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4913
4914 disable = disable ? 1 : (usec ? 0 : 1);
4915 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4916}
230bb0f3
YM
4917
4918void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4919 u32 verbose)
4920{
4921 smp_mb__before_clear_bit();
4922 set_bit(flag, &bp->sp_rtnl_state);
4923 smp_mb__after_clear_bit();
4924 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4925 flag);
4926 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4927}
4928EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);