Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
076bb0c8 27#include <net/busy_poll.h>
c0cba59e 28#include <linux/prefetch.h>
9f6c9258 29#include "bnx2x_cmn.h"
523224a3 30#include "bnx2x_init.h"
042181f5 31#include "bnx2x_sp.h"
9f6c9258 32
a8f47eb7 33static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36static int bnx2x_poll(struct napi_struct *napi, int budget);
37
38static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
39{
40 int i;
41
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
47 }
48}
49
50static void bnx2x_add_all_napi(struct bnx2x *bp)
51{
52 int i;
53
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
59 }
60}
61
62static int bnx2x_calc_num_queues(struct bnx2x *bp)
63{
64 return bnx2x_num_queues ?
65 min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
66 min_t(int, netif_get_num_default_rss_queues(),
67 BNX2X_MAX_QUEUES(bp));
68}
69
b3b83c3f
DK
70/**
71 * bnx2x_move_fp - move content of the fastpath structure.
72 *
73 * @bp: driver handle
74 * @from: source FP index
75 * @to: destination FP index
76 *
77 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
78 * intact. This is done by first copying the napi struct from
79 * the target to the source, and then mem copying the entire
65565884
MS
80 * source onto the target. Update txdata pointers and related
81 * content.
b3b83c3f
DK
82 */
83static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
84{
85 struct bnx2x_fastpath *from_fp = &bp->fp[from];
86 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
87 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
88 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
89 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
90 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
91 int old_max_eth_txqs, new_max_eth_txqs;
92 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 93 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
94
95 /* Copy the NAPI object as it has been already initialized */
96 from_fp->napi = to_fp->napi;
97
b3b83c3f
DK
98 /* Move bnx2x_fastpath contents */
99 memcpy(to_fp, from_fp, sizeof(*to_fp));
100 to_fp->index = to;
65565884 101
34d5626a
YM
102 /* Retain the tpa_info of the original `to' version as we don't want
103 * 2 FPs to contain the same tpa_info pointer.
104 */
105 to_fp->tpa_info = old_tpa_info;
106
15192a8c
BW
107 /* move sp_objs contents as well, as their indices match fp ones */
108 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
109
110 /* move fp_stats contents as well, as their indices match fp ones */
111 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
112
65565884
MS
113 /* Update txdata pointers in fp and move txdata content accordingly:
114 * Each fp consumes 'max_cos' txdata structures, so the index should be
115 * decremented by max_cos x delta.
116 */
117
118 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
119 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
120 (bp)->max_cos;
121 if (from == FCOE_IDX(bp)) {
122 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
123 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
124 }
125
4864a16a
YM
126 memcpy(&bp->bnx2x_txq[new_txdata_index],
127 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
128 sizeof(struct bnx2x_fp_txdata));
129 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
130}
131
8ca5e17e
AE
132/**
133 * bnx2x_fill_fw_str - Fill buffer with FW version string.
134 *
135 * @bp: driver handle
136 * @buf: character buffer to fill with the fw name
137 * @buf_len: length of the above buffer
138 *
139 */
140void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
141{
142 if (IS_PF(bp)) {
143 u8 phy_fw_ver[PHY_FW_VER_LEN];
144
145 phy_fw_ver[0] = '\0';
146 bnx2x_get_ext_phy_fw_version(&bp->link_params,
147 phy_fw_ver, PHY_FW_VER_LEN);
148 strlcpy(buf, bp->fw_ver, buf_len);
149 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
150 "bc %d.%d.%d%s%s",
151 (bp->common.bc_ver & 0xff0000) >> 16,
152 (bp->common.bc_ver & 0xff00) >> 8,
153 (bp->common.bc_ver & 0xff),
154 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
155 } else {
6411280a 156 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
157 }
158}
159
4864a16a
YM
160/**
161 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
162 *
163 * @bp: driver handle
164 * @delta: number of eth queues which were not allocated
165 */
166static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
167{
168 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
169
170 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 171 * backward along the array could cause memory to be overridden
4864a16a
YM
172 */
173 for (cos = 1; cos < bp->max_cos; cos++) {
174 for (i = 0; i < old_eth_num - delta; i++) {
175 struct bnx2x_fastpath *fp = &bp->fp[i];
176 int new_idx = cos * (old_eth_num - delta) + i;
177
178 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
179 sizeof(struct bnx2x_fp_txdata));
180 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
181 }
182 }
183}
184
a8f47eb7 185int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
619c5cb6 186
9f6c9258
DK
187/* free skb in the packet ring at pos idx
188 * return idx of last bd freed
189 */
6383c0b3 190static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
191 u16 idx, unsigned int *pkts_compl,
192 unsigned int *bytes_compl)
9f6c9258 193{
6383c0b3 194 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
195 struct eth_tx_start_bd *tx_start_bd;
196 struct eth_tx_bd *tx_data_bd;
197 struct sk_buff *skb = tx_buf->skb;
198 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
199 int nbd;
95e92fd4 200 u16 split_bd_len = 0;
9f6c9258
DK
201
202 /* prefetch skb end pointer to speedup dev_kfree_skb() */
203 prefetch(&skb->end);
204
51c1a580 205 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 206 txdata->txq_index, idx, tx_buf, skb);
9f6c9258 207
6383c0b3 208 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258
DK
209
210 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
211#ifdef BNX2X_STOP_ON_ERROR
212 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
213 BNX2X_ERR("BAD nbd!\n");
214 bnx2x_panic();
215 }
216#endif
217 new_cons = nbd + tx_buf->first_bd;
218
219 /* Get the next bd */
220 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
221
222 /* Skip a parse bd... */
223 --nbd;
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
95e92fd4 226 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
9f6c9258 227 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
95e92fd4
MS
228 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
229 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
9f6c9258
DK
230 --nbd;
231 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
232 }
233
95e92fd4
MS
234 /* unmap first bd */
235 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
236 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
237 DMA_TO_DEVICE);
238
9f6c9258
DK
239 /* now free frags */
240 while (nbd > 0) {
241
6383c0b3 242 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
243 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
244 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
245 if (--nbd)
246 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
247 }
248
249 /* release skb */
250 WARN_ON(!skb);
d8290ae5 251 if (likely(skb)) {
2df1a70a
TH
252 (*pkts_compl)++;
253 (*bytes_compl) += skb->len;
254 }
d8290ae5 255
40955532 256 dev_kfree_skb_any(skb);
9f6c9258
DK
257 tx_buf->first_bd = 0;
258 tx_buf->skb = NULL;
259
260 return new_cons;
261}
262
6383c0b3 263int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 264{
9f6c9258 265 struct netdev_queue *txq;
6383c0b3 266 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 267 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
268
269#ifdef BNX2X_STOP_ON_ERROR
270 if (unlikely(bp->panic))
271 return -1;
272#endif
273
6383c0b3
AE
274 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
275 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
276 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
277
278 while (sw_cons != hw_cons) {
279 u16 pkt_cons;
280
281 pkt_cons = TX_BD(sw_cons);
282
51c1a580
MS
283 DP(NETIF_MSG_TX_DONE,
284 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 285 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 286
2df1a70a 287 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 288 &pkts_compl, &bytes_compl);
2df1a70a 289
9f6c9258
DK
290 sw_cons++;
291 }
292
2df1a70a
TH
293 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
294
6383c0b3
AE
295 txdata->tx_pkt_cons = sw_cons;
296 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
297
298 /* Need to make the tx_bd_cons update visible to start_xmit()
299 * before checking for netif_tx_queue_stopped(). Without the
300 * memory barrier, there is a small possibility that
301 * start_xmit() will miss it and cause the queue to be stopped
302 * forever.
619c5cb6
VZ
303 * On the other hand we need an rmb() here to ensure the proper
304 * ordering of bit testing in the following
305 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
306 */
307 smp_mb();
308
9f6c9258 309 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 310 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
311 * while it's empty. This could have happen if rx_action() gets
312 * suspended in bnx2x_tx_int() after the condition before
313 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
314 *
315 * stops the queue->sees fresh tx_bd_cons->releases the queue->
316 * sends some packets consuming the whole queue again->
317 * stops the queue
318 */
319
320 __netif_tx_lock(txq, smp_processor_id());
321
322 if ((netif_tx_queue_stopped(txq)) &&
323 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 324 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
325 netif_tx_wake_queue(txq);
326
327 __netif_tx_unlock(txq);
328 }
329 return 0;
330}
331
332static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
333 u16 idx)
334{
335 u16 last_max = fp->last_max_sge;
336
337 if (SUB_S16(idx, last_max) > 0)
338 fp->last_max_sge = idx;
339}
340
621b4d66
DK
341static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
342 u16 sge_len,
343 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
344{
345 struct bnx2x *bp = fp->bp;
9f6c9258
DK
346 u16 last_max, last_elem, first_elem;
347 u16 delta = 0;
348 u16 i;
349
350 if (!sge_len)
351 return;
352
353 /* First mark all used pages */
354 for (i = 0; i < sge_len; i++)
619c5cb6 355 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 356 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
357
358 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 359 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
360
361 /* Here we assume that the last SGE index is the biggest */
362 prefetch((void *)(fp->sge_mask));
523224a3 363 bnx2x_update_last_max_sge(fp,
621b4d66 364 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
365
366 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
367 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
368 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
369
370 /* If ring is not full */
371 if (last_elem + 1 != first_elem)
372 last_elem++;
373
374 /* Now update the prod */
375 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
376 if (likely(fp->sge_mask[i]))
377 break;
378
619c5cb6
VZ
379 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
380 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
381 }
382
383 if (delta > 0) {
384 fp->rx_sge_prod += delta;
385 /* clear page-end entries */
386 bnx2x_clear_sge_mask_next_elems(fp);
387 }
388
389 DP(NETIF_MSG_RX_STATUS,
390 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
391 fp->last_max_sge, fp->rx_sge_prod);
392}
393
2de67439 394/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
395 * CQE (calculated by HW).
396 */
397static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb 398 const struct eth_fast_path_rx_cqe *cqe,
5495ab75 399 enum pkt_hash_types *rxhash_type)
e52fcb24 400{
2de67439 401 /* Get Toeplitz hash from CQE */
e52fcb24 402 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
403 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
404 enum eth_rss_hash_type htype;
405
406 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
5495ab75
TH
407 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
408 (htype == TCP_IPV6_HASH_TYPE)) ?
409 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
410
e52fcb24 411 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb 412 }
5495ab75 413 *rxhash_type = PKT_HASH_TYPE_NONE;
e52fcb24
ED
414 return 0;
415}
416
9f6c9258 417static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 418 u16 cons, u16 prod,
619c5cb6 419 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
420{
421 struct bnx2x *bp = fp->bp;
422 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
423 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
424 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
425 dma_addr_t mapping;
619c5cb6
VZ
426 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
427 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 428
619c5cb6
VZ
429 /* print error if current state != stop */
430 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
431 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
432
e52fcb24 433 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 434 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 435 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
436 fp->rx_buf_size, DMA_FROM_DEVICE);
437 /*
438 * ...if it fails - move the skb from the consumer to the producer
439 * and set the current aggregation state as ERROR to drop it
440 * when TPA_STOP arrives.
441 */
442
443 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
444 /* Move the BD from the consumer to the producer */
e52fcb24 445 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
446 tpa_info->tpa_state = BNX2X_TPA_ERROR;
447 return;
448 }
9f6c9258 449
e52fcb24
ED
450 /* move empty data from pool to prod */
451 prod_rx_buf->data = first_buf->data;
619c5cb6 452 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 453 /* point prod_bd to new data */
9f6c9258
DK
454 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
455 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
456
619c5cb6
VZ
457 /* move partial skb from cons to pool (don't unmap yet) */
458 *first_buf = *cons_rx_buf;
459
460 /* mark bin state as START */
461 tpa_info->parsing_flags =
462 le16_to_cpu(cqe->pars_flags.flags);
463 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
464 tpa_info->tpa_state = BNX2X_TPA_START;
465 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
466 tpa_info->placement_offset = cqe->placement_offset;
5495ab75 467 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
621b4d66
DK
468 if (fp->mode == TPA_MODE_GRO) {
469 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 470 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
471 tpa_info->gro_size = gro_size;
472 }
619c5cb6 473
9f6c9258
DK
474#ifdef BNX2X_STOP_ON_ERROR
475 fp->tpa_queue_used |= (1 << queue);
476#ifdef _ASM_GENERIC_INT_L64_H
477 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
478#else
479 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
480#endif
481 fp->tpa_queue_used);
482#endif
483}
484
e4e3c02a
VZ
485/* Timestamp option length allowed for TPA aggregation:
486 *
487 * nop nop kind length echo val
488 */
489#define TPA_TSTAMP_OPT_LEN 12
490/**
cbf1de72 491 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 492 *
cbf1de72 493 * @skb: packet skb
e8920674
DK
494 * @parsing_flags: parsing flags from the START CQE
495 * @len_on_bd: total length of the first packet for the
496 * aggregation.
cbf1de72 497 * @pkt_len: length of all segments
e8920674
DK
498 *
499 * Approximate value of the MSS for this aggregation calculated using
500 * the first packet of it.
2de67439 501 * Compute number of aggregated segments, and gso_type.
e4e3c02a 502 */
cbf1de72 503static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
504 u16 len_on_bd, unsigned int pkt_len,
505 u16 num_of_coalesced_segs)
e4e3c02a 506{
cbf1de72 507 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 508 * other than timestamp or IPv6 extension headers.
e4e3c02a 509 */
619c5cb6
VZ
510 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
511
512 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 513 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 514 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
515 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
516 } else {
619c5cb6 517 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
518 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
519 }
e4e3c02a
VZ
520
521 /* Check if there was a TCP timestamp, if there is it's will
522 * always be 12 bytes length: nop nop kind length echo val.
523 *
524 * Otherwise FW would close the aggregation.
525 */
526 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
527 hdrs_len += TPA_TSTAMP_OPT_LEN;
528
cbf1de72
YM
529 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
530
531 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
532 * to skb_shinfo(skb)->gso_segs
533 */
ab5777d7 534 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
535}
536
996dedba
MS
537static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
538 u16 index, gfp_t gfp_mask)
1191cb83 539{
996dedba 540 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
1191cb83
ED
541 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
542 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
543 dma_addr_t mapping;
544
545 if (unlikely(page == NULL)) {
546 BNX2X_ERR("Can't alloc sge\n");
547 return -ENOMEM;
548 }
549
550 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 551 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
552 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
553 __free_pages(page, PAGES_PER_SGE_SHIFT);
554 BNX2X_ERR("Can't map sge\n");
555 return -ENOMEM;
556 }
557
558 sw_buf->page = page;
559 dma_unmap_addr_set(sw_buf, mapping, mapping);
560
561 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
562 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
563
564 return 0;
565}
566
9f6c9258 567static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
568 struct bnx2x_agg_info *tpa_info,
569 u16 pages,
570 struct sk_buff *skb,
619c5cb6
VZ
571 struct eth_end_agg_rx_cqe *cqe,
572 u16 cqe_idx)
9f6c9258
DK
573{
574 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
575 u32 i, frag_len, frag_size;
576 int err, j, frag_id = 0;
619c5cb6 577 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 578 u16 full_page = 0, gro_size = 0;
9f6c9258 579
619c5cb6 580 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
581
582 if (fp->mode == TPA_MODE_GRO) {
583 gro_size = tpa_info->gro_size;
584 full_page = tpa_info->full_page;
585 }
9f6c9258
DK
586
587 /* This is needed in order to enable forwarding support */
cbf1de72
YM
588 if (frag_size)
589 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
590 le16_to_cpu(cqe->pkt_len),
591 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 592
9f6c9258 593#ifdef BNX2X_STOP_ON_ERROR
924d75ab 594 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
595 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
596 pages, cqe_idx);
619c5cb6 597 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
598 bnx2x_panic();
599 return -EINVAL;
600 }
601#endif
602
603 /* Run through the SGL and compose the fragmented skb */
604 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 605 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
606
607 /* FW gives the indices of the SGE as if the ring is an array
608 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
609 if (fp->mode == TPA_MODE_GRO)
610 frag_len = min_t(u32, frag_size, (u32)full_page);
611 else /* LRO */
924d75ab 612 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 613
9f6c9258
DK
614 rx_pg = &fp->rx_page_ring[sge_idx];
615 old_rx_pg = *rx_pg;
616
617 /* If we fail to allocate a substitute page, we simply stop
618 where we are and drop the whole packet */
996dedba 619 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 620 if (unlikely(err)) {
15192a8c 621 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
622 return err;
623 }
624
16a5fd92 625 /* Unmap the page as we're going to pass it to the stack */
9f6c9258
DK
626 dma_unmap_page(&bp->pdev->dev,
627 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 628 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 629 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
630 if (fp->mode == TPA_MODE_LRO)
631 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
632 else { /* GRO */
633 int rem;
634 int offset = 0;
635 for (rem = frag_len; rem > 0; rem -= gro_size) {
636 int len = rem > gro_size ? gro_size : rem;
637 skb_fill_page_desc(skb, frag_id++,
638 old_rx_pg.page, offset, len);
639 if (offset)
640 get_page(old_rx_pg.page);
641 offset += len;
642 }
643 }
9f6c9258
DK
644
645 skb->data_len += frag_len;
924d75ab 646 skb->truesize += SGE_PAGES;
9f6c9258
DK
647 skb->len += frag_len;
648
649 frag_size -= frag_len;
650 }
651
652 return 0;
653}
654
d46d132c
ED
655static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
656{
657 if (fp->rx_frag_size)
658 put_page(virt_to_head_page(data));
659 else
660 kfree(data);
661}
662
996dedba 663static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 664{
996dedba
MS
665 if (fp->rx_frag_size) {
666 /* GFP_KERNEL allocations are used only during initialization */
667 if (unlikely(gfp_mask & __GFP_WAIT))
668 return (void *)__get_free_page(gfp_mask);
669
d46d132c 670 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 671 }
d46d132c 672
996dedba 673 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
674}
675
9969085e
YM
676#ifdef CONFIG_INET
677static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
678{
679 const struct iphdr *iph = ip_hdr(skb);
680 struct tcphdr *th;
681
682 skb_set_transport_header(skb, sizeof(struct iphdr));
683 th = tcp_hdr(skb);
684
685 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
686 iph->saddr, iph->daddr, 0);
687}
688
689static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
690{
691 struct ipv6hdr *iph = ipv6_hdr(skb);
692 struct tcphdr *th;
693
694 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
695 th = tcp_hdr(skb);
696
697 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
698 &iph->saddr, &iph->daddr, 0);
699}
2c2d06d5
YM
700
701static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
702 void (*gro_func)(struct bnx2x*, struct sk_buff*))
703{
704 skb_set_network_header(skb, 0);
705 gro_func(bp, skb);
706 tcp_gro_complete(skb);
707}
9969085e
YM
708#endif
709
710static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
711 struct sk_buff *skb)
712{
713#ifdef CONFIG_INET
cbf1de72 714 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
715 switch (be16_to_cpu(skb->protocol)) {
716 case ETH_P_IP:
2c2d06d5 717 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
718 break;
719 case ETH_P_IPV6:
2c2d06d5 720 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
721 break;
722 default:
2c2d06d5 723 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
724 be16_to_cpu(skb->protocol));
725 }
9969085e
YM
726 }
727#endif
60e66fee 728 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
729 napi_gro_receive(&fp->napi, skb);
730}
731
1191cb83
ED
732static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
733 struct bnx2x_agg_info *tpa_info,
734 u16 pages,
735 struct eth_end_agg_rx_cqe *cqe,
736 u16 cqe_idx)
9f6c9258 737{
619c5cb6 738 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 739 u8 pad = tpa_info->placement_offset;
619c5cb6 740 u16 len = tpa_info->len_on_bd;
e52fcb24 741 struct sk_buff *skb = NULL;
621b4d66 742 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
743 u8 old_tpa_state = tpa_info->tpa_state;
744
745 tpa_info->tpa_state = BNX2X_TPA_STOP;
746
747 /* If we there was an error during the handling of the TPA_START -
748 * drop this aggregation.
749 */
750 if (old_tpa_state == BNX2X_TPA_ERROR)
751 goto drop;
752
e52fcb24 753 /* Try to allocate the new data */
996dedba 754 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
755 /* Unmap skb in the pool anyway, as we are going to change
756 pool entry status to BNX2X_TPA_STOP even if new skb allocation
757 fails. */
758 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 759 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 760 if (likely(new_data))
d46d132c 761 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 762
e52fcb24 763 if (likely(skb)) {
9f6c9258 764#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 765 if (pad + len > fp->rx_buf_size) {
51c1a580 766 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 767 pad, len, fp->rx_buf_size);
9f6c9258
DK
768 bnx2x_panic();
769 return;
770 }
771#endif
772
e52fcb24 773 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 774 skb_put(skb, len);
5495ab75 775 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
9f6c9258
DK
776
777 skb->protocol = eth_type_trans(skb, bp->dev);
778 skb->ip_summed = CHECKSUM_UNNECESSARY;
779
621b4d66
DK
780 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
781 skb, cqe, cqe_idx)) {
619c5cb6 782 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 783 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 784 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 785 } else {
51c1a580
MS
786 DP(NETIF_MSG_RX_STATUS,
787 "Failed to allocate new pages - dropping packet!\n");
40955532 788 dev_kfree_skb_any(skb);
9f6c9258
DK
789 }
790
e52fcb24
ED
791 /* put new data in bin */
792 rx_buf->data = new_data;
9f6c9258 793
619c5cb6 794 return;
9f6c9258 795 }
d46d132c 796 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
797drop:
798 /* drop the packet and keep the buffer in the bin */
799 DP(NETIF_MSG_RX_STATUS,
800 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 801 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
802}
803
996dedba
MS
804static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
805 u16 index, gfp_t gfp_mask)
1191cb83
ED
806{
807 u8 *data;
808 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
809 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
810 dma_addr_t mapping;
811
996dedba 812 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
813 if (unlikely(data == NULL))
814 return -ENOMEM;
815
816 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
817 fp->rx_buf_size,
818 DMA_FROM_DEVICE);
819 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 820 bnx2x_frag_free(fp, data);
1191cb83
ED
821 BNX2X_ERR("Can't map rx data\n");
822 return -ENOMEM;
823 }
824
825 rx_buf->data = data;
826 dma_unmap_addr_set(rx_buf, mapping, mapping);
827
828 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
829 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
830
831 return 0;
832}
833
15192a8c
BW
834static
835void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
836 struct bnx2x_fastpath *fp,
837 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 838{
e488921f
MS
839 /* Do nothing if no L4 csum validation was done.
840 * We do not check whether IP csum was validated. For IPv4 we assume
841 * that if the card got as far as validating the L4 csum, it also
842 * validated the IP csum. IPv6 has no IP csum.
843 */
d6cb3e41 844 if (cqe->fast_path_cqe.status_flags &
e488921f 845 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
846 return;
847
e488921f 848 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
849
850 if (cqe->fast_path_cqe.type_error_flags &
851 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
852 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 853 qstats->hw_csum_err++;
d6cb3e41
ED
854 else
855 skb->ip_summed = CHECKSUM_UNNECESSARY;
856}
9f6c9258 857
a8f47eb7 858static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
9f6c9258
DK
859{
860 struct bnx2x *bp = fp->bp;
861 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 862 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 863 int rx_pkt = 0;
75b29459
DK
864 union eth_rx_cqe *cqe;
865 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
866
867#ifdef BNX2X_STOP_ON_ERROR
868 if (unlikely(bp->panic))
869 return 0;
870#endif
871
9f6c9258
DK
872 bd_cons = fp->rx_bd_cons;
873 bd_prod = fp->rx_bd_prod;
874 bd_prod_fw = bd_prod;
875 sw_comp_cons = fp->rx_comp_cons;
876 sw_comp_prod = fp->rx_comp_prod;
877
75b29459
DK
878 comp_ring_cons = RCQ_BD(sw_comp_cons);
879 cqe = &fp->rx_comp_ring[comp_ring_cons];
880 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
881
882 DP(NETIF_MSG_RX_STATUS,
75b29459 883 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 884
75b29459 885 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
886 struct sw_rx_bd *rx_buf = NULL;
887 struct sk_buff *skb;
9f6c9258 888 u8 cqe_fp_flags;
619c5cb6 889 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 890 u16 len, pad, queue;
e52fcb24 891 u8 *data;
bd5cef03 892 u32 rxhash;
5495ab75 893 enum pkt_hash_types rxhash_type;
9f6c9258 894
619c5cb6
VZ
895#ifdef BNX2X_STOP_ON_ERROR
896 if (unlikely(bp->panic))
897 return 0;
898#endif
899
9f6c9258
DK
900 bd_prod = RX_BD(bd_prod);
901 bd_cons = RX_BD(bd_cons);
902
619c5cb6
VZ
903 cqe_fp_flags = cqe_fp->type_error_flags;
904 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 905
51c1a580
MS
906 DP(NETIF_MSG_RX_STATUS,
907 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
908 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
909 cqe_fp_flags, cqe_fp->status_flags,
910 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
911 le16_to_cpu(cqe_fp->vlan_tag),
912 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
913
914 /* is this a slowpath msg? */
619c5cb6 915 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
916 bnx2x_sp_event(fp, cqe);
917 goto next_cqe;
e52fcb24 918 }
621b4d66 919
e52fcb24
ED
920 rx_buf = &fp->rx_buf_ring[bd_cons];
921 data = rx_buf->data;
9f6c9258 922
e52fcb24 923 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
924 struct bnx2x_agg_info *tpa_info;
925 u16 frag_size, pages;
619c5cb6 926#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
927 /* sanity check */
928 if (fp->disable_tpa &&
929 (CQE_TYPE_START(cqe_fp_type) ||
930 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 931 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 932 CQE_TYPE(cqe_fp_type));
619c5cb6 933#endif
9f6c9258 934
e52fcb24
ED
935 if (CQE_TYPE_START(cqe_fp_type)) {
936 u16 queue = cqe_fp->queue_index;
937 DP(NETIF_MSG_RX_STATUS,
938 "calling tpa_start on queue %d\n",
939 queue);
9f6c9258 940
e52fcb24
ED
941 bnx2x_tpa_start(fp, queue,
942 bd_cons, bd_prod,
943 cqe_fp);
621b4d66 944
e52fcb24 945 goto next_rx;
621b4d66
DK
946 }
947 queue = cqe->end_agg_cqe.queue_index;
948 tpa_info = &fp->tpa_info[queue];
949 DP(NETIF_MSG_RX_STATUS,
950 "calling tpa_stop on queue %d\n",
951 queue);
952
953 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
954 tpa_info->len_on_bd;
955
956 if (fp->mode == TPA_MODE_GRO)
957 pages = (frag_size + tpa_info->full_page - 1) /
958 tpa_info->full_page;
959 else
960 pages = SGE_PAGE_ALIGN(frag_size) >>
961 SGE_PAGE_SHIFT;
962
963 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
964 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 965#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
966 if (bp->panic)
967 return 0;
9f6c9258
DK
968#endif
969
621b4d66
DK
970 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
971 goto next_cqe;
e52fcb24
ED
972 }
973 /* non TPA */
621b4d66 974 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
975 pad = cqe_fp->placement_offset;
976 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 977 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
978 pad + RX_COPY_THRESH,
979 DMA_FROM_DEVICE);
980 pad += NET_SKB_PAD;
981 prefetch(data + pad); /* speedup eth_type_trans() */
982 /* is this an error packet? */
983 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 984 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
985 "ERROR flags %x rx packet %u\n",
986 cqe_fp_flags, sw_comp_cons);
15192a8c 987 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
988 goto reuse_rx;
989 }
9f6c9258 990
e52fcb24
ED
991 /* Since we don't have a jumbo ring
992 * copy small packets if mtu > 1500
993 */
994 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
995 (len <= RX_COPY_THRESH)) {
996 skb = netdev_alloc_skb_ip_align(bp->dev, len);
997 if (skb == NULL) {
51c1a580 998 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 999 "ERROR packet dropped because of alloc failure\n");
15192a8c 1000 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
1001 goto reuse_rx;
1002 }
e52fcb24
ED
1003 memcpy(skb->data, data + pad, len);
1004 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1005 } else {
996dedba
MS
1006 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1007 GFP_ATOMIC) == 0)) {
9f6c9258 1008 dma_unmap_single(&bp->pdev->dev,
e52fcb24 1009 dma_unmap_addr(rx_buf, mapping),
a8c94b91 1010 fp->rx_buf_size,
9f6c9258 1011 DMA_FROM_DEVICE);
d46d132c 1012 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 1013 if (unlikely(!skb)) {
d46d132c 1014 bnx2x_frag_free(fp, data);
15192a8c
BW
1015 bnx2x_fp_qstats(bp, fp)->
1016 rx_skb_alloc_failed++;
e52fcb24
ED
1017 goto next_rx;
1018 }
9f6c9258 1019 skb_reserve(skb, pad);
9f6c9258 1020 } else {
51c1a580
MS
1021 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1022 "ERROR packet dropped because of alloc failure\n");
15192a8c 1023 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 1024reuse_rx:
e52fcb24 1025 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
1026 goto next_rx;
1027 }
036d2df9 1028 }
9f6c9258 1029
036d2df9
DK
1030 skb_put(skb, len);
1031 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 1032
036d2df9 1033 /* Set Toeplitz hash for a none-LRO skb */
5495ab75
TH
1034 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1035 skb_set_hash(skb, rxhash, rxhash_type);
9f6c9258 1036
036d2df9 1037 skb_checksum_none_assert(skb);
f85582f8 1038
d6cb3e41 1039 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
1040 bnx2x_csum_validate(skb, cqe, fp,
1041 bnx2x_fp_qstats(bp, fp));
9f6c9258 1042
f233cafe 1043 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1044
619c5cb6
VZ
1045 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1046 PARSING_FLAGS_VLAN)
86a9bad3 1047 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1048 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1049
8b80cda5 1050 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1051
1052 if (bnx2x_fp_ll_polling(fp))
1053 netif_receive_skb(skb);
1054 else
1055 napi_gro_receive(&fp->napi, skb);
9f6c9258 1056next_rx:
e52fcb24 1057 rx_buf->data = NULL;
9f6c9258
DK
1058
1059 bd_cons = NEXT_RX_IDX(bd_cons);
1060 bd_prod = NEXT_RX_IDX(bd_prod);
1061 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1062 rx_pkt++;
1063next_cqe:
1064 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1065 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1066
75b29459
DK
1067 /* mark CQE as free */
1068 BNX2X_SEED_CQE(cqe_fp);
1069
9f6c9258
DK
1070 if (rx_pkt == budget)
1071 break;
75b29459
DK
1072
1073 comp_ring_cons = RCQ_BD(sw_comp_cons);
1074 cqe = &fp->rx_comp_ring[comp_ring_cons];
1075 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1076 } /* while */
1077
1078 fp->rx_bd_cons = bd_cons;
1079 fp->rx_bd_prod = bd_prod_fw;
1080 fp->rx_comp_cons = sw_comp_cons;
1081 fp->rx_comp_prod = sw_comp_prod;
1082
1083 /* Update producers */
1084 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1085 fp->rx_sge_prod);
1086
1087 fp->rx_pkt += rx_pkt;
1088 fp->rx_calls++;
1089
1090 return rx_pkt;
1091}
1092
1093static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1094{
1095 struct bnx2x_fastpath *fp = fp_cookie;
1096 struct bnx2x *bp = fp->bp;
6383c0b3 1097 u8 cos;
9f6c9258 1098
51c1a580
MS
1099 DP(NETIF_MSG_INTR,
1100 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1101 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1102
523224a3 1103 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1104
1105#ifdef BNX2X_STOP_ON_ERROR
1106 if (unlikely(bp->panic))
1107 return IRQ_HANDLED;
1108#endif
1109
1110 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1111 for_each_cos_in_tx_queue(fp, cos)
65565884 1112 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1113
523224a3 1114 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1115 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1116
1117 return IRQ_HANDLED;
1118}
1119
9f6c9258
DK
1120/* HW Lock for shared dual port PHYs */
1121void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1122{
1123 mutex_lock(&bp->port.phy_mutex);
1124
8203c4b6 1125 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1126}
1127
1128void bnx2x_release_phy_lock(struct bnx2x *bp)
1129{
8203c4b6 1130 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1131
1132 mutex_unlock(&bp->port.phy_mutex);
1133}
1134
0793f83f
DK
1135/* calculates MF speed according to current linespeed and MF configuration */
1136u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1137{
1138 u16 line_speed = bp->link_vars.line_speed;
1139 if (IS_MF(bp)) {
faa6fcbb
DK
1140 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1141 bp->mf_config[BP_VN(bp)]);
1142
1143 /* Calculate the current MAX line speed limit for the MF
1144 * devices
0793f83f 1145 */
faa6fcbb
DK
1146 if (IS_MF_SI(bp))
1147 line_speed = (line_speed * maxCfg) / 100;
1148 else { /* SD mode */
0793f83f
DK
1149 u16 vn_max_rate = maxCfg * 100;
1150
1151 if (vn_max_rate < line_speed)
1152 line_speed = vn_max_rate;
faa6fcbb 1153 }
0793f83f
DK
1154 }
1155
1156 return line_speed;
1157}
1158
2ae17f66
VZ
1159/**
1160 * bnx2x_fill_report_data - fill link report data to report
1161 *
1162 * @bp: driver handle
1163 * @data: link state to update
1164 *
1165 * It uses a none-atomic bit operations because is called under the mutex.
1166 */
1191cb83
ED
1167static void bnx2x_fill_report_data(struct bnx2x *bp,
1168 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1169{
1170 u16 line_speed = bnx2x_get_mf_speed(bp);
1171
1172 memset(data, 0, sizeof(*data));
1173
16a5fd92 1174 /* Fill the report data: effective line speed */
2ae17f66
VZ
1175 data->line_speed = line_speed;
1176
1177 /* Link is down */
1178 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1179 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1180 &data->link_report_flags);
1181
1182 /* Full DUPLEX */
1183 if (bp->link_vars.duplex == DUPLEX_FULL)
1184 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1185
1186 /* Rx Flow Control is ON */
1187 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1188 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1189
1190 /* Tx Flow Control is ON */
1191 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1192 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1193}
1194
1195/**
1196 * bnx2x_link_report - report link status to OS.
1197 *
1198 * @bp: driver handle
1199 *
1200 * Calls the __bnx2x_link_report() under the same locking scheme
1201 * as a link/PHY state managing code to ensure a consistent link
1202 * reporting.
1203 */
1204
9f6c9258
DK
1205void bnx2x_link_report(struct bnx2x *bp)
1206{
2ae17f66
VZ
1207 bnx2x_acquire_phy_lock(bp);
1208 __bnx2x_link_report(bp);
1209 bnx2x_release_phy_lock(bp);
1210}
9f6c9258 1211
2ae17f66
VZ
1212/**
1213 * __bnx2x_link_report - report link status to OS.
1214 *
1215 * @bp: driver handle
1216 *
16a5fd92 1217 * None atomic implementation.
2ae17f66
VZ
1218 * Should be called under the phy_lock.
1219 */
1220void __bnx2x_link_report(struct bnx2x *bp)
1221{
1222 struct bnx2x_link_report_data cur_data;
9f6c9258 1223
2ae17f66 1224 /* reread mf_cfg */
ad5afc89 1225 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1226 bnx2x_read_mf_cfg(bp);
1227
1228 /* Read the current link report info */
1229 bnx2x_fill_report_data(bp, &cur_data);
1230
1231 /* Don't report link down or exactly the same link status twice */
1232 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1233 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1234 &bp->last_reported_link.link_report_flags) &&
1235 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1236 &cur_data.link_report_flags)))
1237 return;
1238
1239 bp->link_cnt++;
9f6c9258 1240
2ae17f66
VZ
1241 /* We are going to report a new link parameters now -
1242 * remember the current data for the next time.
1243 */
1244 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1245
2ae17f66
VZ
1246 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1247 &cur_data.link_report_flags)) {
1248 netif_carrier_off(bp->dev);
1249 netdev_err(bp->dev, "NIC Link is Down\n");
1250 return;
1251 } else {
94f05b0f
JP
1252 const char *duplex;
1253 const char *flow;
1254
2ae17f66 1255 netif_carrier_on(bp->dev);
9f6c9258 1256
2ae17f66
VZ
1257 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1258 &cur_data.link_report_flags))
94f05b0f 1259 duplex = "full";
9f6c9258 1260 else
94f05b0f 1261 duplex = "half";
9f6c9258 1262
2ae17f66
VZ
1263 /* Handle the FC at the end so that only these flags would be
1264 * possibly set. This way we may easily check if there is no FC
1265 * enabled.
1266 */
1267 if (cur_data.link_report_flags) {
1268 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1269 &cur_data.link_report_flags)) {
2ae17f66
VZ
1270 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1271 &cur_data.link_report_flags))
94f05b0f
JP
1272 flow = "ON - receive & transmit";
1273 else
1274 flow = "ON - receive";
9f6c9258 1275 } else {
94f05b0f 1276 flow = "ON - transmit";
9f6c9258 1277 }
94f05b0f
JP
1278 } else {
1279 flow = "none";
9f6c9258 1280 }
94f05b0f
JP
1281 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1282 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1283 }
1284}
1285
1191cb83
ED
1286static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1287{
1288 int i;
1289
1290 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1291 struct eth_rx_sge *sge;
1292
1293 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1294 sge->addr_hi =
1295 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1296 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1297
1298 sge->addr_lo =
1299 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1300 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1301 }
1302}
1303
1304static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1305 struct bnx2x_fastpath *fp, int last)
1306{
1307 int i;
1308
1309 for (i = 0; i < last; i++) {
1310 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1311 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1312 u8 *data = first_buf->data;
1313
1314 if (data == NULL) {
1315 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1316 continue;
1317 }
1318 if (tpa_info->tpa_state == BNX2X_TPA_START)
1319 dma_unmap_single(&bp->pdev->dev,
1320 dma_unmap_addr(first_buf, mapping),
1321 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1322 bnx2x_frag_free(fp, data);
1191cb83
ED
1323 first_buf->data = NULL;
1324 }
1325}
1326
55c11941
MS
1327void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1328{
1329 int j;
1330
1331 for_each_rx_queue_cnic(bp, j) {
1332 struct bnx2x_fastpath *fp = &bp->fp[j];
1333
1334 fp->rx_bd_cons = 0;
1335
1336 /* Activate BD ring */
1337 /* Warning!
1338 * this will generate an interrupt (to the TSTORM)
1339 * must only be done after chip is initialized
1340 */
1341 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1342 fp->rx_sge_prod);
1343 }
1344}
1345
9f6c9258
DK
1346void bnx2x_init_rx_rings(struct bnx2x *bp)
1347{
1348 int func = BP_FUNC(bp);
523224a3 1349 u16 ring_prod;
9f6c9258 1350 int i, j;
25141580 1351
b3b83c3f 1352 /* Allocate TPA resources */
55c11941 1353 for_each_eth_queue(bp, j) {
523224a3 1354 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1355
a8c94b91
VZ
1356 DP(NETIF_MSG_IFUP,
1357 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1358
523224a3 1359 if (!fp->disable_tpa) {
16a5fd92 1360 /* Fill the per-aggregation pool */
dfacf138 1361 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1362 struct bnx2x_agg_info *tpa_info =
1363 &fp->tpa_info[i];
1364 struct sw_rx_bd *first_buf =
1365 &tpa_info->first_buf;
1366
996dedba
MS
1367 first_buf->data =
1368 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1369 if (!first_buf->data) {
51c1a580
MS
1370 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1371 j);
9f6c9258
DK
1372 bnx2x_free_tpa_pool(bp, fp, i);
1373 fp->disable_tpa = 1;
1374 break;
1375 }
619c5cb6
VZ
1376 dma_unmap_addr_set(first_buf, mapping, 0);
1377 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1378 }
523224a3
DK
1379
1380 /* "next page" elements initialization */
1381 bnx2x_set_next_page_sgl(fp);
1382
1383 /* set SGEs bit mask */
1384 bnx2x_init_sge_ring_bit_mask(fp);
1385
1386 /* Allocate SGEs and initialize the ring elements */
1387 for (i = 0, ring_prod = 0;
1388 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1389
996dedba
MS
1390 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1391 GFP_KERNEL) < 0) {
51c1a580
MS
1392 BNX2X_ERR("was only able to allocate %d rx sges\n",
1393 i);
1394 BNX2X_ERR("disabling TPA for queue[%d]\n",
1395 j);
523224a3 1396 /* Cleanup already allocated elements */
619c5cb6
VZ
1397 bnx2x_free_rx_sge_range(bp, fp,
1398 ring_prod);
1399 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1400 MAX_AGG_QS(bp));
523224a3
DK
1401 fp->disable_tpa = 1;
1402 ring_prod = 0;
1403 break;
1404 }
1405 ring_prod = NEXT_SGE_IDX(ring_prod);
1406 }
1407
1408 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1409 }
1410 }
1411
55c11941 1412 for_each_eth_queue(bp, j) {
9f6c9258
DK
1413 struct bnx2x_fastpath *fp = &bp->fp[j];
1414
1415 fp->rx_bd_cons = 0;
9f6c9258 1416
b3b83c3f
DK
1417 /* Activate BD ring */
1418 /* Warning!
1419 * this will generate an interrupt (to the TSTORM)
1420 * must only be done after chip is initialized
1421 */
1422 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1423 fp->rx_sge_prod);
9f6c9258 1424
9f6c9258
DK
1425 if (j != 0)
1426 continue;
1427
619c5cb6 1428 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1429 REG_WR(bp, BAR_USTRORM_INTMEM +
1430 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1431 U64_LO(fp->rx_comp_mapping));
1432 REG_WR(bp, BAR_USTRORM_INTMEM +
1433 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1434 U64_HI(fp->rx_comp_mapping));
1435 }
9f6c9258
DK
1436 }
1437}
f85582f8 1438
55c11941 1439static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1440{
6383c0b3 1441 u8 cos;
55c11941 1442 struct bnx2x *bp = fp->bp;
9f6c9258 1443
55c11941
MS
1444 for_each_cos_in_tx_queue(fp, cos) {
1445 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1446 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1447
55c11941
MS
1448 u16 sw_prod = txdata->tx_pkt_prod;
1449 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1450
55c11941
MS
1451 while (sw_cons != sw_prod) {
1452 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1453 &pkts_compl, &bytes_compl);
1454 sw_cons++;
9f6c9258 1455 }
55c11941
MS
1456
1457 netdev_tx_reset_queue(
1458 netdev_get_tx_queue(bp->dev,
1459 txdata->txq_index));
1460 }
1461}
1462
1463static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1464{
1465 int i;
1466
1467 for_each_tx_queue_cnic(bp, i) {
1468 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1469 }
1470}
1471
1472static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1473{
1474 int i;
1475
1476 for_each_eth_queue(bp, i) {
1477 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1478 }
1479}
1480
b3b83c3f
DK
1481static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1482{
1483 struct bnx2x *bp = fp->bp;
1484 int i;
1485
1486 /* ring wasn't allocated */
1487 if (fp->rx_buf_ring == NULL)
1488 return;
1489
1490 for (i = 0; i < NUM_RX_BD; i++) {
1491 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1492 u8 *data = rx_buf->data;
b3b83c3f 1493
e52fcb24 1494 if (data == NULL)
b3b83c3f 1495 continue;
b3b83c3f
DK
1496 dma_unmap_single(&bp->pdev->dev,
1497 dma_unmap_addr(rx_buf, mapping),
1498 fp->rx_buf_size, DMA_FROM_DEVICE);
1499
e52fcb24 1500 rx_buf->data = NULL;
d46d132c 1501 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1502 }
1503}
1504
55c11941
MS
1505static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1506{
1507 int j;
1508
1509 for_each_rx_queue_cnic(bp, j) {
1510 bnx2x_free_rx_bds(&bp->fp[j]);
1511 }
1512}
1513
9f6c9258
DK
1514static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1515{
b3b83c3f 1516 int j;
9f6c9258 1517
55c11941 1518 for_each_eth_queue(bp, j) {
9f6c9258
DK
1519 struct bnx2x_fastpath *fp = &bp->fp[j];
1520
b3b83c3f 1521 bnx2x_free_rx_bds(fp);
9f6c9258 1522
9f6c9258 1523 if (!fp->disable_tpa)
dfacf138 1524 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1525 }
1526}
1527
a8f47eb7 1528static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
55c11941
MS
1529{
1530 bnx2x_free_tx_skbs_cnic(bp);
1531 bnx2x_free_rx_skbs_cnic(bp);
1532}
1533
9f6c9258
DK
1534void bnx2x_free_skbs(struct bnx2x *bp)
1535{
1536 bnx2x_free_tx_skbs(bp);
1537 bnx2x_free_rx_skbs(bp);
1538}
1539
e3835b99
DK
1540void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1541{
1542 /* load old values */
1543 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1544
1545 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1546 /* leave all but MAX value */
1547 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1548
1549 /* set new MAX value */
1550 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1551 & FUNC_MF_CFG_MAX_BW_MASK;
1552
1553 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1554 }
1555}
1556
ca92429f
DK
1557/**
1558 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1559 *
1560 * @bp: driver handle
1561 * @nvecs: number of vectors to be released
1562 */
1563static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1564{
ca92429f 1565 int i, offset = 0;
9f6c9258 1566
ca92429f
DK
1567 if (nvecs == offset)
1568 return;
ad5afc89
AE
1569
1570 /* VFs don't have a default SB */
1571 if (IS_PF(bp)) {
1572 free_irq(bp->msix_table[offset].vector, bp->dev);
1573 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1574 bp->msix_table[offset].vector);
1575 offset++;
1576 }
55c11941
MS
1577
1578 if (CNIC_SUPPORT(bp)) {
1579 if (nvecs == offset)
1580 return;
1581 offset++;
1582 }
ca92429f 1583
ec6ba945 1584 for_each_eth_queue(bp, i) {
ca92429f
DK
1585 if (nvecs == offset)
1586 return;
51c1a580
MS
1587 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1588 i, bp->msix_table[offset].vector);
9f6c9258 1589
ca92429f 1590 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1591 }
1592}
1593
d6214d7a 1594void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1595{
30a5de77 1596 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1597 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1598 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1599
1600 /* vfs don't have a default status block */
1601 if (IS_PF(bp))
1602 nvecs++;
1603
1604 bnx2x_free_msix_irqs(bp, nvecs);
1605 } else {
30a5de77 1606 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1607 }
9f6c9258
DK
1608}
1609
0e8d2ec5 1610int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1611{
1ab4434c 1612 int msix_vec = 0, i, rc;
9f6c9258 1613
1ab4434c
AE
1614 /* VFs don't have a default status block */
1615 if (IS_PF(bp)) {
1616 bp->msix_table[msix_vec].entry = msix_vec;
1617 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1618 bp->msix_table[0].entry);
1619 msix_vec++;
1620 }
9f6c9258 1621
55c11941
MS
1622 /* Cnic requires an msix vector for itself */
1623 if (CNIC_SUPPORT(bp)) {
1624 bp->msix_table[msix_vec].entry = msix_vec;
1625 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1626 msix_vec, bp->msix_table[msix_vec].entry);
1627 msix_vec++;
1628 }
1629
6383c0b3 1630 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1631 for_each_eth_queue(bp, i) {
d6214d7a 1632 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1633 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1634 msix_vec, msix_vec, i);
d6214d7a 1635 msix_vec++;
9f6c9258
DK
1636 }
1637
1ab4434c
AE
1638 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1639 msix_vec);
d6214d7a 1640
1ab4434c 1641 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1642
1643 /*
1644 * reconfigure number of tx/rx queues according to available
1645 * MSI-X vectors
1646 */
55c11941 1647 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1648 /* how less vectors we will have? */
1ab4434c 1649 int diff = msix_vec - rc;
9f6c9258 1650
51c1a580 1651 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1652
1653 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1654
1655 if (rc) {
30a5de77
DK
1656 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1657 goto no_msix;
9f6c9258 1658 }
d6214d7a
DK
1659 /*
1660 * decrease number of queues by number of unallocated entries
1661 */
55c11941
MS
1662 bp->num_ethernet_queues -= diff;
1663 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1664
51c1a580 1665 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1666 bp->num_queues);
1667 } else if (rc > 0) {
1668 /* Get by with single vector */
1669 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1670 if (rc) {
1671 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1672 rc);
1673 goto no_msix;
1674 }
1675
1676 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1677 bp->flags |= USING_SINGLE_MSIX_FLAG;
1678
55c11941
MS
1679 BNX2X_DEV_INFO("set number of queues to 1\n");
1680 bp->num_ethernet_queues = 1;
1681 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1682 } else if (rc < 0) {
51c1a580 1683 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1684 goto no_msix;
9f6c9258
DK
1685 }
1686
1687 bp->flags |= USING_MSIX_FLAG;
1688
1689 return 0;
30a5de77
DK
1690
1691no_msix:
1692 /* fall to INTx if not enough memory */
1693 if (rc == -ENOMEM)
1694 bp->flags |= DISABLE_MSI_FLAG;
1695
1696 return rc;
9f6c9258
DK
1697}
1698
1699static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1700{
ca92429f 1701 int i, rc, offset = 0;
9f6c9258 1702
ad5afc89
AE
1703 /* no default status block for vf */
1704 if (IS_PF(bp)) {
1705 rc = request_irq(bp->msix_table[offset++].vector,
1706 bnx2x_msix_sp_int, 0,
1707 bp->dev->name, bp->dev);
1708 if (rc) {
1709 BNX2X_ERR("request sp irq failed\n");
1710 return -EBUSY;
1711 }
9f6c9258
DK
1712 }
1713
55c11941
MS
1714 if (CNIC_SUPPORT(bp))
1715 offset++;
1716
ec6ba945 1717 for_each_eth_queue(bp, i) {
9f6c9258
DK
1718 struct bnx2x_fastpath *fp = &bp->fp[i];
1719 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1720 bp->dev->name, i);
1721
d6214d7a 1722 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1723 bnx2x_msix_fp_int, 0, fp->name, fp);
1724 if (rc) {
ca92429f
DK
1725 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1726 bp->msix_table[offset].vector, rc);
1727 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1728 return -EBUSY;
1729 }
1730
d6214d7a 1731 offset++;
9f6c9258
DK
1732 }
1733
ec6ba945 1734 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1735 if (IS_PF(bp)) {
1736 offset = 1 + CNIC_SUPPORT(bp);
1737 netdev_info(bp->dev,
1738 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1739 bp->msix_table[0].vector,
1740 0, bp->msix_table[offset].vector,
1741 i - 1, bp->msix_table[offset + i - 1].vector);
1742 } else {
1743 offset = CNIC_SUPPORT(bp);
1744 netdev_info(bp->dev,
1745 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1746 0, bp->msix_table[offset].vector,
1747 i - 1, bp->msix_table[offset + i - 1].vector);
1748 }
9f6c9258
DK
1749 return 0;
1750}
1751
d6214d7a 1752int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1753{
1754 int rc;
1755
1756 rc = pci_enable_msi(bp->pdev);
1757 if (rc) {
51c1a580 1758 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1759 return -1;
1760 }
1761 bp->flags |= USING_MSI_FLAG;
1762
1763 return 0;
1764}
1765
1766static int bnx2x_req_irq(struct bnx2x *bp)
1767{
1768 unsigned long flags;
30a5de77 1769 unsigned int irq;
9f6c9258 1770
30a5de77 1771 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1772 flags = 0;
1773 else
1774 flags = IRQF_SHARED;
1775
30a5de77
DK
1776 if (bp->flags & USING_MSIX_FLAG)
1777 irq = bp->msix_table[0].vector;
1778 else
1779 irq = bp->pdev->irq;
1780
1781 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1782}
1783
c957d09f 1784static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1785{
1786 int rc = 0;
30a5de77
DK
1787 if (bp->flags & USING_MSIX_FLAG &&
1788 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1789 rc = bnx2x_req_msix_irqs(bp);
1790 if (rc)
1791 return rc;
1792 } else {
619c5cb6
VZ
1793 rc = bnx2x_req_irq(bp);
1794 if (rc) {
1795 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1796 return rc;
1797 }
1798 if (bp->flags & USING_MSI_FLAG) {
1799 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1800 netdev_info(bp->dev, "using MSI IRQ %d\n",
1801 bp->dev->irq);
1802 }
1803 if (bp->flags & USING_MSIX_FLAG) {
1804 bp->dev->irq = bp->msix_table[0].vector;
1805 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1806 bp->dev->irq);
619c5cb6
VZ
1807 }
1808 }
1809
1810 return 0;
1811}
1812
55c11941
MS
1813static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1814{
1815 int i;
1816
8f20aa57
DK
1817 for_each_rx_queue_cnic(bp, i) {
1818 bnx2x_fp_init_lock(&bp->fp[i]);
55c11941 1819 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1820 }
55c11941
MS
1821}
1822
1191cb83 1823static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1824{
1825 int i;
1826
8f20aa57
DK
1827 for_each_eth_queue(bp, i) {
1828 bnx2x_fp_init_lock(&bp->fp[i]);
9f6c9258 1829 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1830 }
9f6c9258
DK
1831}
1832
55c11941
MS
1833static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1834{
1835 int i;
1836
8f20aa57 1837 for_each_rx_queue_cnic(bp, i) {
55c11941 1838 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1839 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1840 usleep_range(1000, 2000);
8f20aa57 1841 }
55c11941
MS
1842}
1843
1191cb83 1844static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1845{
1846 int i;
1847
8f20aa57 1848 for_each_eth_queue(bp, i) {
9f6c9258 1849 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1850 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1851 usleep_range(1000, 2000);
8f20aa57 1852 }
9f6c9258
DK
1853}
1854
1855void bnx2x_netif_start(struct bnx2x *bp)
1856{
4b7ed897
DK
1857 if (netif_running(bp->dev)) {
1858 bnx2x_napi_enable(bp);
55c11941
MS
1859 if (CNIC_LOADED(bp))
1860 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1861 bnx2x_int_enable(bp);
1862 if (bp->state == BNX2X_STATE_OPEN)
1863 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1864 }
1865}
1866
1867void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1868{
1869 bnx2x_int_disable_sync(bp, disable_hw);
1870 bnx2x_napi_disable(bp);
55c11941
MS
1871 if (CNIC_LOADED(bp))
1872 bnx2x_napi_disable_cnic(bp);
9f6c9258 1873}
9f6c9258 1874
f663dd9a 1875u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
99932d4f 1876 void *accel_priv, select_queue_fallback_t fallback)
8307fa3e 1877{
8307fa3e 1878 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1879
55c11941 1880 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1881 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1882 u16 ether_type = ntohs(hdr->h_proto);
1883
1884 /* Skip VLAN tag if present */
1885 if (ether_type == ETH_P_8021Q) {
1886 struct vlan_ethhdr *vhdr =
1887 (struct vlan_ethhdr *)skb->data;
1888
1889 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1890 }
1891
1892 /* If ethertype is FCoE or FIP - use FCoE ring */
1893 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1894 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1895 }
55c11941 1896
cdb9d6ae 1897 /* select a non-FCoE queue */
99932d4f 1898 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1899}
1900
d6214d7a
DK
1901void bnx2x_set_num_queues(struct bnx2x *bp)
1902{
96305234 1903 /* RSS queues */
55c11941 1904 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1905
a3348722
BW
1906 /* override in STORAGE SD modes */
1907 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1908 bp->num_ethernet_queues = 1;
1909
ec6ba945 1910 /* Add special queues */
55c11941
MS
1911 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1912 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1913
1914 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1915}
1916
cdb9d6ae
VZ
1917/**
1918 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1919 *
1920 * @bp: Driver handle
1921 *
1922 * We currently support for at most 16 Tx queues for each CoS thus we will
1923 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1924 * bp->max_cos.
1925 *
1926 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1927 * index after all ETH L2 indices.
1928 *
1929 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1930 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1931 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1932 *
1933 * The proper configuration of skb->queue_mapping is handled by
1934 * bnx2x_select_queue() and __skb_tx_hash().
1935 *
1936 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1937 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1938 */
55c11941 1939static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1940{
6383c0b3 1941 int rc, tx, rx;
ec6ba945 1942
65565884 1943 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1944 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1945
6383c0b3 1946/* account for fcoe queue */
55c11941
MS
1947 if (include_cnic && !NO_FCOE(bp)) {
1948 rx++;
1949 tx++;
6383c0b3 1950 }
6383c0b3
AE
1951
1952 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1953 if (rc) {
1954 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1955 return rc;
1956 }
1957 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1958 if (rc) {
1959 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1960 return rc;
1961 }
1962
51c1a580 1963 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1964 tx, rx);
1965
ec6ba945
VZ
1966 return rc;
1967}
1968
1191cb83 1969static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1970{
1971 int i;
1972
1973 for_each_queue(bp, i) {
1974 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1975 u32 mtu;
a8c94b91
VZ
1976
1977 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1978 if (IS_FCOE_IDX(i))
1979 /*
1980 * Although there are no IP frames expected to arrive to
1981 * this ring we still want to add an
1982 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1983 * overrun attack.
1984 */
e52fcb24 1985 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1986 else
e52fcb24
ED
1987 mtu = bp->dev->mtu;
1988 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1989 IP_HEADER_ALIGNMENT_PADDING +
1990 ETH_OVREHEAD +
1991 mtu +
1992 BNX2X_FW_RX_ALIGN_END;
16a5fd92 1993 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
1994 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1995 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1996 else
1997 fp->rx_frag_size = 0;
a8c94b91
VZ
1998 }
1999}
2000
60cad4e6 2001static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
2002{
2003 int i;
619c5cb6
VZ
2004 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2005
16a5fd92 2006 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
2007 * enabled
2008 */
5d317c6a
MS
2009 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2010 bp->rss_conf_obj.ind_table[i] =
96305234
DK
2011 bp->fp->cl_id +
2012 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
2013
2014 /*
2015 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2016 * per-port, so if explicit configuration is needed , do it only
2017 * for a PMF.
2018 *
2019 * For 57712 and newer on the other hand it's a per-function
2020 * configuration.
2021 */
5d317c6a 2022 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
2023}
2024
60cad4e6
AE
2025int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2026 bool config_hash, bool enable)
619c5cb6 2027{
3b603066 2028 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
2029
2030 /* Although RSS is meaningless when there is a single HW queue we
2031 * still need it enabled in order to have HW Rx hash generated.
2032 *
2033 * if (!is_eth_multi(bp))
2034 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2035 */
2036
96305234 2037 params.rss_obj = rss_obj;
619c5cb6
VZ
2038
2039 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2040
60cad4e6
AE
2041 if (enable) {
2042 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2043
2044 /* RSS configuration */
2045 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2046 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2047 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2048 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2049 if (rss_obj->udp_rss_v4)
2050 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2051 if (rss_obj->udp_rss_v6)
2052 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2053 } else {
2054 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2055 }
619c5cb6 2056
96305234
DK
2057 /* Hash bits */
2058 params.rss_result_mask = MULTI_MASK;
619c5cb6 2059
5d317c6a 2060 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2061
96305234
DK
2062 if (config_hash) {
2063 /* RSS keys */
60cad4e6 2064 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2065 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2066 }
2067
60cad4e6
AE
2068 if (IS_PF(bp))
2069 return bnx2x_config_rss(bp, &params);
2070 else
2071 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2072}
2073
1191cb83 2074static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2075{
3b603066 2076 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2077
2078 /* Prepare parameters for function state transitions */
2079 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2080
2081 func_params.f_obj = &bp->func_obj;
2082 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2083
2084 func_params.params.hw_init.load_phase = load_code;
2085
2086 return bnx2x_func_state_change(bp, &func_params);
2087}
2088
2089/*
2090 * Cleans the object that have internal lists without sending
16a5fd92 2091 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2092 */
7fa6f340 2093void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2094{
2095 int rc;
2096 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2097 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2098 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2099
2100 /***************** Cleanup MACs' object first *************************/
2101
2102 /* Wait for completion of requested */
2103 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2104 /* Perform a dry cleanup */
2105 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2106
2107 /* Clean ETH primary MAC */
2108 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2109 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2110 &ramrod_flags);
2111 if (rc != 0)
2112 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2113
2114 /* Cleanup UC list */
2115 vlan_mac_flags = 0;
2116 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2117 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2118 &ramrod_flags);
2119 if (rc != 0)
2120 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2121
2122 /***************** Now clean mcast object *****************************/
2123 rparam.mcast_obj = &bp->mcast_obj;
2124 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2125
8b09be5f
YM
2126 /* Add a DEL command... - Since we're doing a driver cleanup only,
2127 * we take a lock surrounding both the initial send and the CONTs,
2128 * as we don't want a true completion to disrupt us in the middle.
2129 */
2130 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2131 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2132 if (rc < 0)
51c1a580
MS
2133 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2134 rc);
619c5cb6
VZ
2135
2136 /* ...and wait until all pending commands are cleared */
2137 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2138 while (rc != 0) {
2139 if (rc < 0) {
2140 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2141 rc);
8b09be5f 2142 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2143 return;
2144 }
2145
2146 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2147 }
8b09be5f 2148 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2149}
2150
2151#ifndef BNX2X_STOP_ON_ERROR
2152#define LOAD_ERROR_EXIT(bp, label) \
2153 do { \
2154 (bp)->state = BNX2X_STATE_ERROR; \
2155 goto label; \
2156 } while (0)
55c11941
MS
2157
2158#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2159 do { \
2160 bp->cnic_loaded = false; \
2161 goto label; \
2162 } while (0)
2163#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2164#define LOAD_ERROR_EXIT(bp, label) \
2165 do { \
2166 (bp)->state = BNX2X_STATE_ERROR; \
2167 (bp)->panic = 1; \
2168 return -EBUSY; \
2169 } while (0)
55c11941
MS
2170#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2171 do { \
2172 bp->cnic_loaded = false; \
2173 (bp)->panic = 1; \
2174 return -EBUSY; \
2175 } while (0)
2176#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2177
ad5afc89
AE
2178static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2179{
2180 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2181 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2182 return;
2183}
2184
2185static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2186{
8db573ba 2187 int num_groups, vf_headroom = 0;
ad5afc89 2188 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2189
ad5afc89
AE
2190 /* number of queues for statistics is number of eth queues + FCoE */
2191 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2192
ad5afc89
AE
2193 /* Total number of FW statistics requests =
2194 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2195 * and fcoe l2 queue) stats + num of queues (which includes another 1
2196 * for fcoe l2 queue if applicable)
2197 */
2198 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2199
8db573ba
AE
2200 /* vf stats appear in the request list, but their data is allocated by
2201 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2202 * it is used to determine where to place the vf stats queries in the
2203 * request struct
2204 */
2205 if (IS_SRIOV(bp))
6411280a 2206 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2207
ad5afc89
AE
2208 /* Request is built from stats_query_header and an array of
2209 * stats_query_cmd_group each of which contains
2210 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2211 * configured in the stats_query_header.
2212 */
2213 num_groups =
8db573ba
AE
2214 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2215 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2216 1 : 0));
2217
8db573ba
AE
2218 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2219 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2220 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2221 num_groups * sizeof(struct stats_query_cmd_group);
2222
2223 /* Data for statistics requests + stats_counter
2224 * stats_counter holds per-STORM counters that are incremented
2225 * when STORM has finished with the current request.
2226 * memory for FCoE offloaded statistics are counted anyway,
2227 * even if they will not be sent.
2228 * VF stats are not accounted for here as the data of VF stats is stored
2229 * in memory allocated by the VF, not here.
2230 */
2231 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2232 sizeof(struct per_pf_stats) +
2233 sizeof(struct fcoe_statistics_params) +
2234 sizeof(struct per_queue_stats) * num_queue_stats +
2235 sizeof(struct stats_counter);
2236
2237 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2238 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2239
2240 /* Set shortcuts */
2241 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2242 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2243 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2244 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2245 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2246 bp->fw_stats_req_sz;
2247
6bf07b8e 2248 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2249 U64_HI(bp->fw_stats_req_mapping),
2250 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2251 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2252 U64_HI(bp->fw_stats_data_mapping),
2253 U64_LO(bp->fw_stats_data_mapping));
2254 return 0;
2255
2256alloc_mem_err:
2257 bnx2x_free_fw_stats_mem(bp);
2258 BNX2X_ERR("Can't allocate FW stats memory\n");
2259 return -ENOMEM;
2260}
2261
2262/* send load request to mcp and analyze response */
2263static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2264{
178135c1
DK
2265 u32 param;
2266
ad5afc89
AE
2267 /* init fw_seq */
2268 bp->fw_seq =
2269 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2270 DRV_MSG_SEQ_NUMBER_MASK);
2271 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2272
2273 /* Get current FW pulse sequence */
2274 bp->fw_drv_pulse_wr_seq =
2275 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2276 DRV_PULSE_SEQ_MASK);
2277 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2278
178135c1
DK
2279 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2280
2281 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2282 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2283
ad5afc89 2284 /* load request */
178135c1 2285 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2286
2287 /* if mcp fails to respond we must abort */
2288 if (!(*load_code)) {
2289 BNX2X_ERR("MCP response failure, aborting\n");
2290 return -EBUSY;
2291 }
2292
2293 /* If mcp refused (e.g. other port is in diagnostic mode) we
2294 * must abort
2295 */
2296 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2297 BNX2X_ERR("MCP refused load request, aborting\n");
2298 return -EBUSY;
2299 }
2300 return 0;
2301}
2302
2303/* check whether another PF has already loaded FW to chip. In
2304 * virtualized environments a pf from another VM may have already
2305 * initialized the device including loading FW
2306 */
91ebb929 2307int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
ad5afc89
AE
2308{
2309 /* is another pf loaded on this engine? */
2310 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2311 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2312 /* build my FW version dword */
2313 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2314 (BCM_5710_FW_MINOR_VERSION << 8) +
2315 (BCM_5710_FW_REVISION_VERSION << 16) +
2316 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2317
2318 /* read loaded FW from chip */
2319 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2320
2321 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2322 loaded_fw, my_fw);
2323
2324 /* abort nic load if version mismatch */
2325 if (my_fw != loaded_fw) {
91ebb929
YM
2326 if (print_err)
2327 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2328 loaded_fw, my_fw);
2329 else
2330 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2331 loaded_fw, my_fw);
ad5afc89
AE
2332 return -EBUSY;
2333 }
2334 }
2335 return 0;
2336}
2337
2338/* returns the "mcp load_code" according to global load_count array */
2339static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2340{
2341 int path = BP_PATH(bp);
2342
2343 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
a8f47eb7 2344 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2345 bnx2x_load_count[path][2]);
2346 bnx2x_load_count[path][0]++;
2347 bnx2x_load_count[path][1 + port]++;
ad5afc89 2348 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
a8f47eb7 2349 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2350 bnx2x_load_count[path][2]);
2351 if (bnx2x_load_count[path][0] == 1)
ad5afc89 2352 return FW_MSG_CODE_DRV_LOAD_COMMON;
a8f47eb7 2353 else if (bnx2x_load_count[path][1 + port] == 1)
ad5afc89
AE
2354 return FW_MSG_CODE_DRV_LOAD_PORT;
2355 else
2356 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2357}
2358
2359/* mark PMF if applicable */
2360static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2361{
2362 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2363 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2364 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2365 bp->port.pmf = 1;
2366 /* We need the barrier to ensure the ordering between the
2367 * writing to bp->port.pmf here and reading it from the
2368 * bnx2x_periodic_task().
2369 */
2370 smp_mb();
2371 } else {
2372 bp->port.pmf = 0;
452427b0
YM
2373 }
2374
ad5afc89
AE
2375 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2376}
2377
2378static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2379{
2380 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2381 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2382 (bp->common.shmem2_base)) {
2383 if (SHMEM2_HAS(bp, dcc_support))
2384 SHMEM2_WR(bp, dcc_support,
2385 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2386 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2387 if (SHMEM2_HAS(bp, afex_driver_support))
2388 SHMEM2_WR(bp, afex_driver_support,
2389 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2390 }
2391
2392 /* Set AFEX default VLAN tag to an invalid value */
2393 bp->afex_def_vlan_tag = -1;
452427b0
YM
2394}
2395
1191cb83
ED
2396/**
2397 * bnx2x_bz_fp - zero content of the fastpath structure.
2398 *
2399 * @bp: driver handle
2400 * @index: fastpath index to be zeroed
2401 *
2402 * Makes sure the contents of the bp->fp[index].napi is kept
2403 * intact.
2404 */
2405static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2406{
2407 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2408 int cos;
1191cb83 2409 struct napi_struct orig_napi = fp->napi;
15192a8c 2410 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2411
1191cb83 2412 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2413 if (fp->tpa_info)
2414 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2415 sizeof(struct bnx2x_agg_info));
2416 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2417
2418 /* Restore the NAPI object as it has been already initialized */
2419 fp->napi = orig_napi;
15192a8c 2420 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2421 fp->bp = bp;
2422 fp->index = index;
2423 if (IS_ETH_FP(fp))
2424 fp->max_cos = bp->max_cos;
2425 else
2426 /* Special queues support only one CoS */
2427 fp->max_cos = 1;
2428
65565884 2429 /* Init txdata pointers */
65565884
MS
2430 if (IS_FCOE_FP(fp))
2431 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2432 if (IS_ETH_FP(fp))
2433 for_each_cos_in_tx_queue(fp, cos)
2434 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2435 BNX2X_NUM_ETH_QUEUES(bp) + index];
2436
16a5fd92 2437 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2438 * minimal size so it must be set prior to queue memory allocation
2439 */
2440 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2441 (bp->flags & GRO_ENABLE_FLAG &&
2442 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2443 if (bp->flags & TPA_ENABLE_FLAG)
2444 fp->mode = TPA_MODE_LRO;
2445 else if (bp->flags & GRO_ENABLE_FLAG)
2446 fp->mode = TPA_MODE_GRO;
2447
1191cb83
ED
2448 /* We don't want TPA on an FCoE L2 ring */
2449 if (IS_FCOE_FP(fp))
2450 fp->disable_tpa = 1;
55c11941
MS
2451}
2452
2453int bnx2x_load_cnic(struct bnx2x *bp)
2454{
2455 int i, rc, port = BP_PORT(bp);
2456
2457 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2458
2459 mutex_init(&bp->cnic_mutex);
2460
ad5afc89
AE
2461 if (IS_PF(bp)) {
2462 rc = bnx2x_alloc_mem_cnic(bp);
2463 if (rc) {
2464 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2465 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2466 }
55c11941
MS
2467 }
2468
2469 rc = bnx2x_alloc_fp_mem_cnic(bp);
2470 if (rc) {
2471 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2472 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2473 }
2474
2475 /* Update the number of queues with the cnic queues */
2476 rc = bnx2x_set_real_num_queues(bp, 1);
2477 if (rc) {
2478 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2479 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2480 }
2481
2482 /* Add all CNIC NAPI objects */
2483 bnx2x_add_all_napi_cnic(bp);
2484 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2485 bnx2x_napi_enable_cnic(bp);
2486
2487 rc = bnx2x_init_hw_func_cnic(bp);
2488 if (rc)
2489 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2490
2491 bnx2x_nic_init_cnic(bp);
2492
ad5afc89
AE
2493 if (IS_PF(bp)) {
2494 /* Enable Timer scan */
2495 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2496
2497 /* setup cnic queues */
2498 for_each_cnic_queue(bp, i) {
2499 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2500 if (rc) {
2501 BNX2X_ERR("Queue setup failed\n");
2502 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2503 }
55c11941
MS
2504 }
2505 }
2506
2507 /* Initialize Rx filter. */
8b09be5f 2508 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2509
2510 /* re-read iscsi info */
2511 bnx2x_get_iscsi_info(bp);
2512 bnx2x_setup_cnic_irq_info(bp);
2513 bnx2x_setup_cnic_info(bp);
2514 bp->cnic_loaded = true;
2515 if (bp->state == BNX2X_STATE_OPEN)
2516 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2517
55c11941
MS
2518 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2519
2520 return 0;
2521
2522#ifndef BNX2X_STOP_ON_ERROR
2523load_error_cnic2:
2524 /* Disable Timer scan */
2525 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2526
2527load_error_cnic1:
2528 bnx2x_napi_disable_cnic(bp);
2529 /* Update the number of queues without the cnic queues */
d9d81862 2530 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2531 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2532load_error_cnic0:
2533 BNX2X_ERR("CNIC-related load failed\n");
2534 bnx2x_free_fp_mem_cnic(bp);
2535 bnx2x_free_mem_cnic(bp);
2536 return rc;
2537#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2538}
2539
9f6c9258
DK
2540/* must be called with rtnl_lock */
2541int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2542{
619c5cb6 2543 int port = BP_PORT(bp);
ad5afc89 2544 int i, rc = 0, load_code = 0;
9f6c9258 2545
55c11941
MS
2546 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2547 DP(NETIF_MSG_IFUP,
2548 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2549
9f6c9258 2550#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2551 if (unlikely(bp->panic)) {
2552 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2553 return -EPERM;
51c1a580 2554 }
9f6c9258
DK
2555#endif
2556
2557 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2558
16a5fd92 2559 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2560 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2561 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2562 &bp->last_reported_link.link_report_flags);
2ae17f66 2563
ad5afc89
AE
2564 if (IS_PF(bp))
2565 /* must be called before memory allocation and HW init */
2566 bnx2x_ilt_set_info(bp);
523224a3 2567
6383c0b3
AE
2568 /*
2569 * Zero fastpath structures preserving invariants like napi, which are
2570 * allocated only once, fp index, max_cos, bp pointer.
65565884 2571 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2572 */
51c1a580 2573 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2574 for_each_queue(bp, i)
2575 bnx2x_bz_fp(bp, i);
55c11941
MS
2576 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2577 bp->num_cnic_queues) *
2578 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2579
55c11941 2580 bp->fcoe_init = false;
6383c0b3 2581
a8c94b91
VZ
2582 /* Set the receive queues buffer size */
2583 bnx2x_set_rx_buf_size(bp);
2584
ad5afc89
AE
2585 if (IS_PF(bp)) {
2586 rc = bnx2x_alloc_mem(bp);
2587 if (rc) {
2588 BNX2X_ERR("Unable to allocate bp memory\n");
2589 return rc;
2590 }
2591 }
2592
ad5afc89
AE
2593 /* need to be done after alloc mem, since it's self adjusting to amount
2594 * of memory available for RSS queues
2595 */
2596 rc = bnx2x_alloc_fp_mem(bp);
2597 if (rc) {
2598 BNX2X_ERR("Unable to allocate memory for fps\n");
2599 LOAD_ERROR_EXIT(bp, load_error0);
2600 }
d6214d7a 2601
e3ed4eae
DK
2602 /* Allocated memory for FW statistics */
2603 if (bnx2x_alloc_fw_stats_mem(bp))
2604 LOAD_ERROR_EXIT(bp, load_error0);
2605
8d9ac297
AE
2606 /* request pf to initialize status blocks */
2607 if (IS_VF(bp)) {
2608 rc = bnx2x_vfpf_init(bp);
2609 if (rc)
2610 LOAD_ERROR_EXIT(bp, load_error0);
2611 }
2612
b3b83c3f
DK
2613 /* As long as bnx2x_alloc_mem() may possibly update
2614 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2615 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2616 */
55c11941 2617 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2618 if (rc) {
ec6ba945 2619 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2620 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2621 }
2622
6383c0b3 2623 /* configure multi cos mappings in kernel.
16a5fd92
YM
2624 * this configuration may be overridden by a multi class queue
2625 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2626 */
2627 bnx2x_setup_tc(bp->dev, bp->max_cos);
2628
26614ba5
MS
2629 /* Add all NAPI objects */
2630 bnx2x_add_all_napi(bp);
55c11941 2631 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2632 bnx2x_napi_enable(bp);
2633
ad5afc89
AE
2634 if (IS_PF(bp)) {
2635 /* set pf load just before approaching the MCP */
2636 bnx2x_set_pf_load(bp);
2637
2638 /* if mcp exists send load request and analyze response */
2639 if (!BP_NOMCP(bp)) {
2640 /* attempt to load pf */
2641 rc = bnx2x_nic_load_request(bp, &load_code);
2642 if (rc)
2643 LOAD_ERROR_EXIT(bp, load_error1);
2644
2645 /* what did mcp say? */
91ebb929 2646 rc = bnx2x_compare_fw_ver(bp, load_code, true);
ad5afc89
AE
2647 if (rc) {
2648 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2649 LOAD_ERROR_EXIT(bp, load_error2);
2650 }
ad5afc89
AE
2651 } else {
2652 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2653 }
9f6c9258 2654
ad5afc89
AE
2655 /* mark pmf if applicable */
2656 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2657
ad5afc89
AE
2658 /* Init Function state controlling object */
2659 bnx2x__init_func_obj(bp);
6383c0b3 2660
ad5afc89
AE
2661 /* Initialize HW */
2662 rc = bnx2x_init_hw(bp, load_code);
2663 if (rc) {
2664 BNX2X_ERR("HW init failed, aborting\n");
2665 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2666 LOAD_ERROR_EXIT(bp, load_error2);
2667 }
9f6c9258
DK
2668 }
2669
ecf01c22
YM
2670 bnx2x_pre_irq_nic_init(bp);
2671
d6214d7a
DK
2672 /* Connect to IRQs */
2673 rc = bnx2x_setup_irqs(bp);
523224a3 2674 if (rc) {
ad5afc89
AE
2675 BNX2X_ERR("setup irqs failed\n");
2676 if (IS_PF(bp))
2677 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2678 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2679 }
2680
619c5cb6 2681 /* Init per-function objects */
ad5afc89 2682 if (IS_PF(bp)) {
ecf01c22
YM
2683 /* Setup NIC internals and enable interrupts */
2684 bnx2x_post_irq_nic_init(bp, load_code);
2685
ad5afc89 2686 bnx2x_init_bp_objs(bp);
b56e9670 2687 bnx2x_iov_nic_init(bp);
a3348722 2688
ad5afc89
AE
2689 /* Set AFEX default VLAN tag to an invalid value */
2690 bp->afex_def_vlan_tag = -1;
2691 bnx2x_nic_load_afex_dcc(bp, load_code);
2692 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2693 rc = bnx2x_func_start(bp);
2694 if (rc) {
2695 BNX2X_ERR("Function start failed!\n");
2696 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2697
619c5cb6 2698 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2699 }
9f6c9258 2700
ad5afc89
AE
2701 /* Send LOAD_DONE command to MCP */
2702 if (!BP_NOMCP(bp)) {
2703 load_code = bnx2x_fw_command(bp,
2704 DRV_MSG_CODE_LOAD_DONE, 0);
2705 if (!load_code) {
2706 BNX2X_ERR("MCP response failure, aborting\n");
2707 rc = -EBUSY;
2708 LOAD_ERROR_EXIT(bp, load_error3);
2709 }
2710 }
9f6c9258 2711
0c14e5ce
AE
2712 /* initialize FW coalescing state machines in RAM */
2713 bnx2x_update_coalesce(bp);
60cad4e6 2714 }
0c14e5ce 2715
60cad4e6
AE
2716 /* setup the leading queue */
2717 rc = bnx2x_setup_leading(bp);
2718 if (rc) {
2719 BNX2X_ERR("Setup leading failed!\n");
2720 LOAD_ERROR_EXIT(bp, load_error3);
2721 }
ad5afc89 2722
60cad4e6
AE
2723 /* set up the rest of the queues */
2724 for_each_nondefault_eth_queue(bp, i) {
2725 if (IS_PF(bp))
2726 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2727 else /* VF */
2728 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2729 if (rc) {
60cad4e6 2730 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2731 LOAD_ERROR_EXIT(bp, load_error3);
2732 }
60cad4e6 2733 }
8d9ac297 2734
60cad4e6
AE
2735 /* setup rss */
2736 rc = bnx2x_init_rss(bp);
2737 if (rc) {
2738 BNX2X_ERR("PF RSS init failed\n");
2739 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2740 }
619c5cb6 2741
523224a3
DK
2742 /* Now when Clients are configured we are ready to work */
2743 bp->state = BNX2X_STATE_OPEN;
2744
619c5cb6 2745 /* Configure a ucast MAC */
ad5afc89
AE
2746 if (IS_PF(bp))
2747 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2748 else /* vf */
f8f4f61a
DK
2749 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2750 true);
51c1a580
MS
2751 if (rc) {
2752 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2753 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2754 }
6e30dd4e 2755
ad5afc89 2756 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2757 bnx2x_update_max_mf_config(bp, bp->pending_max);
2758 bp->pending_max = 0;
2759 }
2760
ad5afc89
AE
2761 if (bp->port.pmf) {
2762 rc = bnx2x_initial_phy_init(bp, load_mode);
2763 if (rc)
2764 LOAD_ERROR_EXIT(bp, load_error3);
2765 }
c63da990 2766 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2767
619c5cb6
VZ
2768 /* Start fast path */
2769
2770 /* Initialize Rx filter. */
8b09be5f 2771 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2772
619c5cb6 2773 /* Start the Tx */
9f6c9258
DK
2774 switch (load_mode) {
2775 case LOAD_NORMAL:
16a5fd92 2776 /* Tx queue should be only re-enabled */
523224a3 2777 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2778 break;
2779
2780 case LOAD_OPEN:
2781 netif_tx_start_all_queues(bp->dev);
523224a3 2782 smp_mb__after_clear_bit();
9f6c9258
DK
2783 break;
2784
2785 case LOAD_DIAG:
8970b2e4 2786 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2787 bp->state = BNX2X_STATE_DIAG;
2788 break;
2789
2790 default:
2791 break;
2792 }
2793
00253a8c 2794 if (bp->port.pmf)
4c704899 2795 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2796 else
9f6c9258
DK
2797 bnx2x__link_status_update(bp);
2798
2799 /* start the timer */
2800 mod_timer(&bp->timer, jiffies + bp->current_interval);
2801
55c11941
MS
2802 if (CNIC_ENABLED(bp))
2803 bnx2x_load_cnic(bp);
9f6c9258 2804
ad5afc89
AE
2805 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2806 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2807 u32 val;
2808 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2809 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2810 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2811 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2812 }
2813
619c5cb6 2814 /* Wait for all pending SP commands to complete */
ad5afc89 2815 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2816 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2817 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2818 return -EBUSY;
2819 }
6891dd25 2820
9876879f
BW
2821 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2822 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2823 bnx2x_dcbx_init(bp, false);
2824
55c11941
MS
2825 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2826
9f6c9258
DK
2827 return 0;
2828
619c5cb6 2829#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2830load_error3:
ad5afc89
AE
2831 if (IS_PF(bp)) {
2832 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2833
ad5afc89
AE
2834 /* Clean queueable objects */
2835 bnx2x_squeeze_objects(bp);
2836 }
619c5cb6 2837
9f6c9258
DK
2838 /* Free SKBs, SGEs, TPA pool and driver internals */
2839 bnx2x_free_skbs(bp);
ec6ba945 2840 for_each_rx_queue(bp, i)
9f6c9258 2841 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2842
9f6c9258 2843 /* Release IRQs */
d6214d7a
DK
2844 bnx2x_free_irq(bp);
2845load_error2:
ad5afc89 2846 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2847 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2848 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2849 }
2850
2851 bp->port.pmf = 0;
9f6c9258
DK
2852load_error1:
2853 bnx2x_napi_disable(bp);
722c6f58 2854 bnx2x_del_all_napi(bp);
ad5afc89 2855
889b9af3 2856 /* clear pf_load status, as it was already set */
ad5afc89
AE
2857 if (IS_PF(bp))
2858 bnx2x_clear_pf_load(bp);
d6214d7a 2859load_error0:
ad5afc89 2860 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2861 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2862 bnx2x_free_mem(bp);
2863
2864 return rc;
619c5cb6 2865#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2866}
2867
7fa6f340 2868int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2869{
2870 u8 rc = 0, cos, i;
2871
2872 /* Wait until tx fastpath tasks complete */
2873 for_each_tx_queue(bp, i) {
2874 struct bnx2x_fastpath *fp = &bp->fp[i];
2875
2876 for_each_cos_in_tx_queue(fp, cos)
2877 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2878 if (rc)
2879 return rc;
2880 }
2881 return 0;
2882}
2883
9f6c9258 2884/* must be called with rtnl_lock */
5d07d868 2885int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2886{
2887 int i;
c9ee9206
VZ
2888 bool global = false;
2889
55c11941
MS
2890 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2891
9ce392d4 2892 /* mark driver is unloaded in shmem2 */
ad5afc89 2893 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2894 u32 val;
2895 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2896 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2897 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2898 }
2899
80bfe5cc 2900 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2901 (bp->state == BNX2X_STATE_CLOSED ||
2902 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2903 /* We can get here if the driver has been unloaded
2904 * during parity error recovery and is either waiting for a
2905 * leader to complete or for other functions to unload and
2906 * then ifdown has been issued. In this case we want to
2907 * unload and let other functions to complete a recovery
2908 * process.
2909 */
9f6c9258
DK
2910 bp->recovery_state = BNX2X_RECOVERY_DONE;
2911 bp->is_leader = 0;
c9ee9206
VZ
2912 bnx2x_release_leader_lock(bp);
2913 smp_mb();
2914
51c1a580
MS
2915 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2916 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2917 return -EINVAL;
2918 }
2919
80bfe5cc 2920 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2921 * have not completed successfully - all resources are released.
80bfe5cc
YM
2922 *
2923 * we can get here only after unsuccessful ndo_* callback, during which
2924 * dev->IFF_UP flag is still on.
2925 */
2926 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2927 return 0;
2928
2929 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2930 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2931 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2932 */
2933 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2934 smp_mb();
2935
78c3bcc5
AE
2936 /* indicate to VFs that the PF is going down */
2937 bnx2x_iov_channel_down(bp);
2938
55c11941
MS
2939 if (CNIC_LOADED(bp))
2940 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2941
9505ee37
VZ
2942 /* Stop Tx */
2943 bnx2x_tx_disable(bp);
65565884 2944 netdev_reset_tc(bp->dev);
9505ee37 2945
9f6c9258 2946 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2947
9f6c9258 2948 del_timer_sync(&bp->timer);
f85582f8 2949
ad5afc89
AE
2950 if (IS_PF(bp)) {
2951 /* Set ALWAYS_ALIVE bit in shmem */
2952 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2953 bnx2x_drv_pulse(bp);
2954 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2955 bnx2x_save_statistics(bp);
2956 }
9f6c9258 2957
ad5afc89
AE
2958 /* wait till consumers catch up with producers in all queues */
2959 bnx2x_drain_tx_queues(bp);
9f6c9258 2960
9b176b6b
AE
2961 /* if VF indicate to PF this function is going down (PF will delete sp
2962 * elements and clear initializations
2963 */
2964 if (IS_VF(bp))
2965 bnx2x_vfpf_close_vf(bp);
2966 else if (unload_mode != UNLOAD_RECOVERY)
2967 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2968 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2969 else {
c9ee9206
VZ
2970 /* Send the UNLOAD_REQUEST to the MCP */
2971 bnx2x_send_unload_req(bp, unload_mode);
2972
16a5fd92 2973 /* Prevent transactions to host from the functions on the
c9ee9206 2974 * engine that doesn't reset global blocks in case of global
16a5fd92 2975 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
2976 * (the engine which leader will perform the recovery
2977 * last).
2978 */
2979 if (!CHIP_IS_E1x(bp))
2980 bnx2x_pf_disable(bp);
2981
2982 /* Disable HW interrupts, NAPI */
523224a3 2983 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2984 /* Delete all NAPI objects */
2985 bnx2x_del_all_napi(bp);
55c11941
MS
2986 if (CNIC_LOADED(bp))
2987 bnx2x_del_all_napi_cnic(bp);
523224a3 2988 /* Release IRQs */
d6214d7a 2989 bnx2x_free_irq(bp);
c9ee9206
VZ
2990
2991 /* Report UNLOAD_DONE to MCP */
5d07d868 2992 bnx2x_send_unload_done(bp, false);
523224a3 2993 }
9f6c9258 2994
619c5cb6 2995 /*
16a5fd92 2996 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
2997 * the queueable objects here in case they failed to get cleaned so far.
2998 */
ad5afc89
AE
2999 if (IS_PF(bp))
3000 bnx2x_squeeze_objects(bp);
619c5cb6 3001
79616895
VZ
3002 /* There should be no more pending SP commands at this stage */
3003 bp->sp_state = 0;
3004
9f6c9258
DK
3005 bp->port.pmf = 0;
3006
a0d307b2
DK
3007 /* clear pending work in rtnl task */
3008 bp->sp_rtnl_state = 0;
3009 smp_mb();
3010
9f6c9258
DK
3011 /* Free SKBs, SGEs, TPA pool and driver internals */
3012 bnx2x_free_skbs(bp);
55c11941
MS
3013 if (CNIC_LOADED(bp))
3014 bnx2x_free_skbs_cnic(bp);
ec6ba945 3015 for_each_rx_queue(bp, i)
9f6c9258 3016 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 3017
ad5afc89
AE
3018 bnx2x_free_fp_mem(bp);
3019 if (CNIC_LOADED(bp))
55c11941 3020 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 3021
ad5afc89 3022 if (IS_PF(bp)) {
ad5afc89
AE
3023 if (CNIC_LOADED(bp))
3024 bnx2x_free_mem_cnic(bp);
3025 }
b4cddbd6
AE
3026 bnx2x_free_mem(bp);
3027
9f6c9258 3028 bp->state = BNX2X_STATE_CLOSED;
55c11941 3029 bp->cnic_loaded = false;
9f6c9258 3030
c9ee9206
VZ
3031 /* Check if there are pending parity attentions. If there are - set
3032 * RECOVERY_IN_PROGRESS.
3033 */
ad5afc89 3034 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
3035 bnx2x_set_reset_in_progress(bp);
3036
3037 /* Set RESET_IS_GLOBAL if needed */
3038 if (global)
3039 bnx2x_set_reset_global(bp);
3040 }
3041
9f6c9258
DK
3042 /* The last driver must disable a "close the gate" if there is no
3043 * parity attention or "process kill" pending.
3044 */
ad5afc89
AE
3045 if (IS_PF(bp) &&
3046 !bnx2x_clear_pf_load(bp) &&
3047 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3048 bnx2x_disable_close_the_gate(bp);
3049
55c11941
MS
3050 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3051
9f6c9258
DK
3052 return 0;
3053}
f85582f8 3054
9f6c9258
DK
3055int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3056{
3057 u16 pmcsr;
3058
adf5f6a1 3059 /* If there is no power capability, silently succeed */
29ed74c3 3060 if (!bp->pdev->pm_cap) {
51c1a580 3061 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3062 return 0;
3063 }
3064
29ed74c3 3065 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3066
3067 switch (state) {
3068 case PCI_D0:
29ed74c3 3069 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3070 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3071 PCI_PM_CTRL_PME_STATUS));
3072
3073 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3074 /* delay required during transition out of D3hot */
3075 msleep(20);
3076 break;
3077
3078 case PCI_D3hot:
3079 /* If there are other clients above don't
3080 shut down the power */
3081 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3082 return 0;
3083 /* Don't shut down the power for emulation and FPGA */
3084 if (CHIP_REV_IS_SLOW(bp))
3085 return 0;
3086
3087 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3088 pmcsr |= 3;
3089
3090 if (bp->wol)
3091 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3092
29ed74c3 3093 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3094 pmcsr);
3095
3096 /* No more memory access after this point until
3097 * device is brought back to D0.
3098 */
3099 break;
3100
3101 default:
51c1a580 3102 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3103 return -EINVAL;
3104 }
3105 return 0;
3106}
3107
9f6c9258
DK
3108/*
3109 * net_device service functions
3110 */
a8f47eb7 3111static int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3112{
3113 int work_done = 0;
6383c0b3 3114 u8 cos;
9f6c9258
DK
3115 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3116 napi);
3117 struct bnx2x *bp = fp->bp;
3118
3119 while (1) {
3120#ifdef BNX2X_STOP_ON_ERROR
3121 if (unlikely(bp->panic)) {
3122 napi_complete(napi);
3123 return 0;
3124 }
3125#endif
8f20aa57
DK
3126 if (!bnx2x_fp_lock_napi(fp))
3127 return work_done;
9f6c9258 3128
6383c0b3 3129 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3130 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3131 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3132
9f6c9258
DK
3133 if (bnx2x_has_rx_work(fp)) {
3134 work_done += bnx2x_rx_int(fp, budget - work_done);
3135
3136 /* must not complete if we consumed full budget */
8f20aa57
DK
3137 if (work_done >= budget) {
3138 bnx2x_fp_unlock_napi(fp);
9f6c9258 3139 break;
8f20aa57 3140 }
9f6c9258
DK
3141 }
3142
3143 /* Fall out from the NAPI loop if needed */
8f20aa57
DK
3144 if (!bnx2x_fp_unlock_napi(fp) &&
3145 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3146
ec6ba945
VZ
3147 /* No need to update SB for FCoE L2 ring as long as
3148 * it's connected to the default SB and the SB
3149 * has been updated when NAPI was scheduled.
3150 */
3151 if (IS_FCOE_FP(fp)) {
3152 napi_complete(napi);
3153 break;
3154 }
9f6c9258 3155 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3156 /* bnx2x_has_rx_work() reads the status block,
3157 * thus we need to ensure that status block indices
3158 * have been actually read (bnx2x_update_fpsb_idx)
3159 * prior to this check (bnx2x_has_rx_work) so that
3160 * we won't write the "newer" value of the status block
3161 * to IGU (if there was a DMA right after
3162 * bnx2x_has_rx_work and if there is no rmb, the memory
3163 * reading (bnx2x_update_fpsb_idx) may be postponed
3164 * to right before bnx2x_ack_sb). In this case there
3165 * will never be another interrupt until there is
3166 * another update of the status block, while there
3167 * is still unhandled work.
3168 */
9f6c9258
DK
3169 rmb();
3170
3171 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3172 napi_complete(napi);
3173 /* Re-enable interrupts */
51c1a580 3174 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3175 "Update index to %d\n", fp->fp_hc_idx);
3176 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3177 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3178 IGU_INT_ENABLE, 1);
3179 break;
3180 }
3181 }
3182 }
3183
3184 return work_done;
3185}
3186
e0d1095a 3187#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3188/* must be called with local_bh_disable()d */
3189int bnx2x_low_latency_recv(struct napi_struct *napi)
3190{
3191 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3192 napi);
3193 struct bnx2x *bp = fp->bp;
3194 int found = 0;
3195
3196 if ((bp->state == BNX2X_STATE_CLOSED) ||
3197 (bp->state == BNX2X_STATE_ERROR) ||
3198 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3199 return LL_FLUSH_FAILED;
3200
3201 if (!bnx2x_fp_lock_poll(fp))
3202 return LL_FLUSH_BUSY;
3203
75b29459 3204 if (bnx2x_has_rx_work(fp))
8f20aa57 3205 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3206
3207 bnx2x_fp_unlock_poll(fp);
3208
3209 return found;
3210}
3211#endif
3212
9f6c9258
DK
3213/* we split the first BD into headers and data BDs
3214 * to ease the pain of our fellow microcode engineers
3215 * we use one mapping for both BDs
9f6c9258 3216 */
91226790
DK
3217static u16 bnx2x_tx_split(struct bnx2x *bp,
3218 struct bnx2x_fp_txdata *txdata,
3219 struct sw_tx_bd *tx_buf,
3220 struct eth_tx_start_bd **tx_bd, u16 hlen,
3221 u16 bd_prod)
9f6c9258
DK
3222{
3223 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3224 struct eth_tx_bd *d_tx_bd;
3225 dma_addr_t mapping;
3226 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3227
3228 /* first fix first BD */
9f6c9258
DK
3229 h_tx_bd->nbytes = cpu_to_le16(hlen);
3230
91226790
DK
3231 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3232 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3233
3234 /* now get a new data BD
3235 * (after the pbd) and fill it */
3236 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3237 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3238
3239 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3240 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3241
3242 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3243 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3244 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3245
3246 /* this marks the BD as one that has no individual mapping */
3247 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3248
3249 DP(NETIF_MSG_TX_QUEUED,
3250 "TSO split data size is %d (%x:%x)\n",
3251 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3252
3253 /* update tx_bd */
3254 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3255
3256 return bd_prod;
3257}
3258
86564c3f
YM
3259#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3260#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3261static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3262{
86564c3f
YM
3263 __sum16 tsum = (__force __sum16) csum;
3264
9f6c9258 3265 if (fix > 0)
86564c3f
YM
3266 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3267 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3268
3269 else if (fix < 0)
86564c3f
YM
3270 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3271 csum_partial(t_header, -fix, 0)));
9f6c9258 3272
e2593fcd 3273 return bswab16(tsum);
9f6c9258
DK
3274}
3275
91226790 3276static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3277{
3278 u32 rc;
a848ade4
DK
3279 __u8 prot = 0;
3280 __be16 protocol;
9f6c9258
DK
3281
3282 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3283 return XMIT_PLAIN;
9f6c9258 3284
a848ade4
DK
3285 protocol = vlan_get_protocol(skb);
3286 if (protocol == htons(ETH_P_IPV6)) {
3287 rc = XMIT_CSUM_V6;
3288 prot = ipv6_hdr(skb)->nexthdr;
3289 } else {
3290 rc = XMIT_CSUM_V4;
3291 prot = ip_hdr(skb)->protocol;
3292 }
9f6c9258 3293
a848ade4
DK
3294 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3295 if (inner_ip_hdr(skb)->version == 6) {
3296 rc |= XMIT_CSUM_ENC_V6;
3297 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3298 rc |= XMIT_CSUM_TCP;
9f6c9258 3299 } else {
a848ade4
DK
3300 rc |= XMIT_CSUM_ENC_V4;
3301 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3302 rc |= XMIT_CSUM_TCP;
3303 }
3304 }
a848ade4
DK
3305 if (prot == IPPROTO_TCP)
3306 rc |= XMIT_CSUM_TCP;
9f6c9258 3307
36a8f39e
ED
3308 if (skb_is_gso(skb)) {
3309 if (skb_is_gso_v6(skb)) {
3310 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3311 if (rc & XMIT_CSUM_ENC)
3312 rc |= XMIT_GSO_ENC_V6;
3313 } else {
3314 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3315 if (rc & XMIT_CSUM_ENC)
3316 rc |= XMIT_GSO_ENC_V4;
3317 }
a848ade4 3318 }
9f6c9258
DK
3319
3320 return rc;
3321}
3322
3323#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3324/* check if packet requires linearization (packet is too fragmented)
3325 no need to check fragmentation if page size > 8K (there will be no
3326 violation to FW restrictions) */
3327static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3328 u32 xmit_type)
3329{
3330 int to_copy = 0;
3331 int hlen = 0;
3332 int first_bd_sz = 0;
3333
3334 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3335 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3336
3337 if (xmit_type & XMIT_GSO) {
3338 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3339 /* Check if LSO packet needs to be copied:
3340 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3341 int wnd_size = MAX_FETCH_BD - 3;
3342 /* Number of windows to check */
3343 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3344 int wnd_idx = 0;
3345 int frag_idx = 0;
3346 u32 wnd_sum = 0;
3347
3348 /* Headers length */
3349 hlen = (int)(skb_transport_header(skb) - skb->data) +
3350 tcp_hdrlen(skb);
3351
3352 /* Amount of data (w/o headers) on linear part of SKB*/
3353 first_bd_sz = skb_headlen(skb) - hlen;
3354
3355 wnd_sum = first_bd_sz;
3356
3357 /* Calculate the first sum - it's special */
3358 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3359 wnd_sum +=
9e903e08 3360 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3361
3362 /* If there was data on linear skb data - check it */
3363 if (first_bd_sz > 0) {
3364 if (unlikely(wnd_sum < lso_mss)) {
3365 to_copy = 1;
3366 goto exit_lbl;
3367 }
3368
3369 wnd_sum -= first_bd_sz;
3370 }
3371
3372 /* Others are easier: run through the frag list and
3373 check all windows */
3374 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3375 wnd_sum +=
9e903e08 3376 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3377
3378 if (unlikely(wnd_sum < lso_mss)) {
3379 to_copy = 1;
3380 break;
3381 }
3382 wnd_sum -=
9e903e08 3383 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3384 }
3385 } else {
3386 /* in non-LSO too fragmented packet should always
3387 be linearized */
3388 to_copy = 1;
3389 }
3390 }
3391
3392exit_lbl:
3393 if (unlikely(to_copy))
3394 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3395 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3396 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3397 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3398
3399 return to_copy;
3400}
3401#endif
3402
91226790
DK
3403static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3404 u32 xmit_type)
f2e0899f 3405{
a848ade4
DK
3406 struct ipv6hdr *ipv6;
3407
2297a2da
VZ
3408 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3409 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3410 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3411
3412 if (xmit_type & XMIT_GSO_ENC_V6)
3413 ipv6 = inner_ipv6_hdr(skb);
3414 else if (xmit_type & XMIT_GSO_V6)
3415 ipv6 = ipv6_hdr(skb);
3416 else
3417 ipv6 = NULL;
3418
3419 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3420 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3421}
3422
3423/**
e8920674 3424 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3425 *
e8920674
DK
3426 * @skb: packet skb
3427 * @pbd: parse BD
3428 * @xmit_type: xmit flags
f2e0899f 3429 */
91226790
DK
3430static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3431 struct eth_tx_parse_bd_e1x *pbd,
057cf65e 3432 struct eth_tx_start_bd *tx_start_bd,
91226790 3433 u32 xmit_type)
f2e0899f
DK
3434{
3435 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3436 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3437 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3438
3439 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3440 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3441 pbd->tcp_pseudo_csum =
86564c3f
YM
3442 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3443 ip_hdr(skb)->daddr,
3444 0, IPPROTO_TCP, 0));
f2e0899f 3445
057cf65e
YM
3446 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3447 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3448 } else {
f2e0899f 3449 pbd->tcp_pseudo_csum =
86564c3f
YM
3450 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3451 &ipv6_hdr(skb)->daddr,
3452 0, IPPROTO_TCP, 0));
057cf65e 3453 }
f2e0899f 3454
86564c3f
YM
3455 pbd->global_data |=
3456 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3457}
f85582f8 3458
a848ade4
DK
3459/**
3460 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3461 *
3462 * @bp: driver handle
3463 * @skb: packet skb
3464 * @parsing_data: data to be updated
3465 * @xmit_type: xmit flags
3466 *
3467 * 57712/578xx related, when skb has encapsulation
3468 */
3469static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3470 u32 *parsing_data, u32 xmit_type)
3471{
3472 *parsing_data |=
3473 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3474 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3475 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3476
3477 if (xmit_type & XMIT_CSUM_TCP) {
3478 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3479 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3480 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3481
3482 return skb_inner_transport_header(skb) +
3483 inner_tcp_hdrlen(skb) - skb->data;
3484 }
3485
3486 /* We support checksum offload for TCP and UDP only.
3487 * No need to pass the UDP header length - it's a constant.
3488 */
3489 return skb_inner_transport_header(skb) +
3490 sizeof(struct udphdr) - skb->data;
3491}
3492
f2e0899f 3493/**
e8920674 3494 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3495 *
e8920674
DK
3496 * @bp: driver handle
3497 * @skb: packet skb
3498 * @parsing_data: data to be updated
3499 * @xmit_type: xmit flags
f2e0899f 3500 *
91226790 3501 * 57712/578xx related
f2e0899f 3502 */
91226790
DK
3503static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3504 u32 *parsing_data, u32 xmit_type)
f2e0899f 3505{
e39aece7 3506 *parsing_data |=
2de67439 3507 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3508 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3509 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3510
e39aece7
VZ
3511 if (xmit_type & XMIT_CSUM_TCP) {
3512 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3513 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3514 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3515
e39aece7 3516 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3517 }
3518 /* We support checksum offload for TCP and UDP only.
3519 * No need to pass the UDP header length - it's a constant.
3520 */
3521 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3522}
3523
a848ade4 3524/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3525static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3526 struct eth_tx_start_bd *tx_start_bd,
3527 u32 xmit_type)
93ef5c02 3528{
93ef5c02
DK
3529 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3530
a848ade4 3531 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3532 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3533
3534 if (!(xmit_type & XMIT_CSUM_TCP))
3535 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3536}
3537
f2e0899f 3538/**
e8920674 3539 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3540 *
e8920674
DK
3541 * @bp: driver handle
3542 * @skb: packet skb
3543 * @pbd: parse BD to be updated
3544 * @xmit_type: xmit flags
f2e0899f 3545 */
91226790
DK
3546static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3547 struct eth_tx_parse_bd_e1x *pbd,
3548 u32 xmit_type)
f2e0899f 3549{
e39aece7 3550 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3551
3552 /* for now NS flag is not used in Linux */
3553 pbd->global_data =
86564c3f
YM
3554 cpu_to_le16(hlen |
3555 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3556 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3557
3558 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3559 skb_network_header(skb)) >> 1;
f2e0899f 3560
e39aece7
VZ
3561 hlen += pbd->ip_hlen_w;
3562
3563 /* We support checksum offload for TCP and UDP only */
3564 if (xmit_type & XMIT_CSUM_TCP)
3565 hlen += tcp_hdrlen(skb) / 2;
3566 else
3567 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3568
3569 pbd->total_hlen_w = cpu_to_le16(hlen);
3570 hlen = hlen*2;
3571
3572 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3573 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3574
3575 } else {
3576 s8 fix = SKB_CS_OFF(skb); /* signed! */
3577
3578 DP(NETIF_MSG_TX_QUEUED,
3579 "hlen %d fix %d csum before fix %x\n",
3580 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3581
3582 /* HW bug: fixup the CSUM */
3583 pbd->tcp_pseudo_csum =
3584 bnx2x_csum_fix(skb_transport_header(skb),
3585 SKB_CS(skb), fix);
3586
3587 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3588 pbd->tcp_pseudo_csum);
3589 }
3590
3591 return hlen;
3592}
f85582f8 3593
a848ade4
DK
3594static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3595 struct eth_tx_parse_bd_e2 *pbd_e2,
3596 struct eth_tx_parse_2nd_bd *pbd2,
3597 u16 *global_data,
3598 u32 xmit_type)
3599{
e287a75c 3600 u16 hlen_w = 0;
a848ade4 3601 u8 outerip_off, outerip_len = 0;
e768fb29 3602
e287a75c
DK
3603 /* from outer IP to transport */
3604 hlen_w = (skb_inner_transport_header(skb) -
3605 skb_network_header(skb)) >> 1;
a848ade4
DK
3606
3607 /* transport len */
e768fb29 3608 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3609
e287a75c 3610 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3611
e768fb29
DK
3612 /* outer IP header info */
3613 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3614 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3615 u32 csum = (__force u32)(~iph->check) -
3616 (__force u32)iph->tot_len -
3617 (__force u32)iph->frag_off;
c957d09f 3618
a848ade4 3619 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3620 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3621 } else {
3622 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3623 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3624 }
3625
3626 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3627
3628 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3629
3630 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3631 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3632
3633 pbd_e2->data.tunnel_data.pseudo_csum =
3634 bswab16(~csum_tcpudp_magic(
3635 inner_ip_hdr(skb)->saddr,
3636 inner_ip_hdr(skb)->daddr,
3637 0, IPPROTO_TCP, 0));
3638
3639 outerip_len = ip_hdr(skb)->ihl << 1;
3640 } else {
3641 pbd_e2->data.tunnel_data.pseudo_csum =
3642 bswab16(~csum_ipv6_magic(
3643 &inner_ipv6_hdr(skb)->saddr,
3644 &inner_ipv6_hdr(skb)->daddr,
3645 0, IPPROTO_TCP, 0));
3646 }
3647
3648 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3649
3650 *global_data |=
3651 outerip_off |
3652 (!!(xmit_type & XMIT_CSUM_V6) <<
3653 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3654 (outerip_len <<
3655 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3656 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3657 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3658
3659 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3660 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3661 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3662 }
a848ade4
DK
3663}
3664
9f6c9258
DK
3665/* called with netif_tx_lock
3666 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3667 * netif_wake_queue()
3668 */
3669netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3670{
3671 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3672
9f6c9258 3673 struct netdev_queue *txq;
6383c0b3 3674 struct bnx2x_fp_txdata *txdata;
9f6c9258 3675 struct sw_tx_bd *tx_buf;
619c5cb6 3676 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3677 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3678 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3679 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3680 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3681 u32 pbd_e2_parsing_data = 0;
9f6c9258 3682 u16 pkt_prod, bd_prod;
65565884 3683 int nbd, txq_index;
9f6c9258
DK
3684 dma_addr_t mapping;
3685 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3686 int i;
3687 u8 hlen = 0;
3688 __le16 pkt_size = 0;
3689 struct ethhdr *eth;
3690 u8 mac_type = UNICAST_ADDRESS;
3691
3692#ifdef BNX2X_STOP_ON_ERROR
3693 if (unlikely(bp->panic))
3694 return NETDEV_TX_BUSY;
3695#endif
3696
6383c0b3
AE
3697 txq_index = skb_get_queue_mapping(skb);
3698 txq = netdev_get_tx_queue(dev, txq_index);
3699
55c11941 3700 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3701
65565884 3702 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3703
3704 /* enable this debug print to view the transmission queue being used
51c1a580 3705 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3706 txq_index, fp_index, txdata_index); */
9f6c9258 3707
16a5fd92 3708 /* enable this debug print to view the transmission details
51c1a580
MS
3709 DP(NETIF_MSG_TX_QUEUED,
3710 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3711 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3712
6383c0b3 3713 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3714 skb_shinfo(skb)->nr_frags +
3715 BDS_PER_TX_PKT +
3716 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3717 /* Handle special storage cases separately */
c96bdc0c
DK
3718 if (txdata->tx_ring_size == 0) {
3719 struct bnx2x_eth_q_stats *q_stats =
3720 bnx2x_fp_qstats(bp, txdata->parent_fp);
3721 q_stats->driver_filtered_tx_pkt++;
3722 dev_kfree_skb(skb);
3723 return NETDEV_TX_OK;
3724 }
2de67439
YM
3725 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3726 netif_tx_stop_queue(txq);
c96bdc0c 3727 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3728
9f6c9258
DK
3729 return NETDEV_TX_BUSY;
3730 }
3731
51c1a580 3732 DP(NETIF_MSG_TX_QUEUED,
04c46736 3733 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3734 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3735 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3736 skb->len);
9f6c9258
DK
3737
3738 eth = (struct ethhdr *)skb->data;
3739
3740 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3741 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3742 if (is_broadcast_ether_addr(eth->h_dest))
3743 mac_type = BROADCAST_ADDRESS;
3744 else
3745 mac_type = MULTICAST_ADDRESS;
3746 }
3747
91226790 3748#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3749 /* First, check if we need to linearize the skb (due to FW
3750 restrictions). No need to check fragmentation if page size > 8K
3751 (there will be no violation to FW restrictions) */
3752 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3753 /* Statistics of linearization */
3754 bp->lin_cnt++;
3755 if (skb_linearize(skb) != 0) {
51c1a580
MS
3756 DP(NETIF_MSG_TX_QUEUED,
3757 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3758 dev_kfree_skb_any(skb);
3759 return NETDEV_TX_OK;
3760 }
3761 }
3762#endif
619c5cb6
VZ
3763 /* Map skb linear data for DMA */
3764 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3765 skb_headlen(skb), DMA_TO_DEVICE);
3766 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3767 DP(NETIF_MSG_TX_QUEUED,
3768 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3769 dev_kfree_skb_any(skb);
3770 return NETDEV_TX_OK;
3771 }
9f6c9258
DK
3772 /*
3773 Please read carefully. First we use one BD which we mark as start,
3774 then we have a parsing info BD (used for TSO or xsum),
3775 and only then we have the rest of the TSO BDs.
3776 (don't forget to mark the last one as last,
3777 and to unmap only AFTER you write to the BD ...)
3778 And above all, all pdb sizes are in words - NOT DWORDS!
3779 */
3780
619c5cb6
VZ
3781 /* get current pkt produced now - advance it just before sending packet
3782 * since mapping of pages may fail and cause packet to be dropped
3783 */
6383c0b3
AE
3784 pkt_prod = txdata->tx_pkt_prod;
3785 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3786
619c5cb6
VZ
3787 /* get a tx_buf and first BD
3788 * tx_start_bd may be changed during SPLIT,
3789 * but first_bd will always stay first
3790 */
6383c0b3
AE
3791 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3792 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3793 first_bd = tx_start_bd;
9f6c9258
DK
3794
3795 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3796
91226790
DK
3797 /* header nbd: indirectly zero other flags! */
3798 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3799
3800 /* remember the first BD of the packet */
6383c0b3 3801 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3802 tx_buf->skb = skb;
3803 tx_buf->flags = 0;
3804
3805 DP(NETIF_MSG_TX_QUEUED,
3806 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3807 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3808
eab6d18d 3809 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3810 tx_start_bd->vlan_or_ethertype =
3811 cpu_to_le16(vlan_tx_tag_get(skb));
3812 tx_start_bd->bd_flags.as_bitfield |=
3813 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3814 } else {
3815 /* when transmitting in a vf, start bd must hold the ethertype
3816 * for fw to enforce it
3817 */
91226790 3818 if (IS_VF(bp))
dc1ba591
AE
3819 tx_start_bd->vlan_or_ethertype =
3820 cpu_to_le16(ntohs(eth->h_proto));
91226790 3821 else
dc1ba591
AE
3822 /* used by FW for packet accounting */
3823 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3824 }
9f6c9258 3825
91226790
DK
3826 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3827
9f6c9258
DK
3828 /* turn on parsing and get a BD */
3829 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3830
93ef5c02
DK
3831 if (xmit_type & XMIT_CSUM)
3832 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3833
619c5cb6 3834 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3835 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3836 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3837
3838 if (xmit_type & XMIT_CSUM_ENC) {
3839 u16 global_data = 0;
3840
3841 /* Set PBD in enc checksum offload case */
3842 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3843 &pbd_e2_parsing_data,
3844 xmit_type);
3845
3846 /* turn on 2nd parsing and get a BD */
3847 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3848
3849 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3850
3851 memset(pbd2, 0, sizeof(*pbd2));
3852
3853 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3854 (skb_inner_network_header(skb) -
3855 skb->data) >> 1;
3856
3857 if (xmit_type & XMIT_GSO_ENC)
3858 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3859 &global_data,
3860 xmit_type);
3861
3862 pbd2->global_data = cpu_to_le16(global_data);
3863
3864 /* add addition parse BD indication to start BD */
3865 SET_FLAG(tx_start_bd->general_data,
3866 ETH_TX_START_BD_PARSE_NBDS, 1);
3867 /* set encapsulation flag in start BD */
3868 SET_FLAG(tx_start_bd->general_data,
3869 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3870 nbd++;
3871 } else if (xmit_type & XMIT_CSUM) {
91226790 3872 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3873 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3874 &pbd_e2_parsing_data,
3875 xmit_type);
a848ade4 3876 }
dc1ba591 3877
91226790
DK
3878 /* Add the macs to the parsing BD this is a vf */
3879 if (IS_VF(bp)) {
3880 /* override GRE parameters in BD */
3881 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3882 &pbd_e2->data.mac_addr.src_mid,
3883 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3884 eth->h_source);
91226790
DK
3885
3886 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3887 &pbd_e2->data.mac_addr.dst_mid,
3888 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3889 eth->h_dest);
3890 }
96bed4b9
YM
3891
3892 SET_FLAG(pbd_e2_parsing_data,
3893 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3894 } else {
96bed4b9 3895 u16 global_data = 0;
6383c0b3 3896 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3897 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3898 /* Set PBD in checksum offload case */
3899 if (xmit_type & XMIT_CSUM)
3900 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3901
96bed4b9
YM
3902 SET_FLAG(global_data,
3903 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3904 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3905 }
3906
f85582f8 3907 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3908 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3909 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3910 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3911 pkt_size = tx_start_bd->nbytes;
3912
51c1a580 3913 DP(NETIF_MSG_TX_QUEUED,
91226790 3914 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3915 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3916 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3917 tx_start_bd->bd_flags.as_bitfield,
3918 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3919
3920 if (xmit_type & XMIT_GSO) {
3921
3922 DP(NETIF_MSG_TX_QUEUED,
3923 "TSO packet len %d hlen %d total len %d tso size %d\n",
3924 skb->len, hlen, skb_headlen(skb),
3925 skb_shinfo(skb)->gso_size);
3926
3927 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3928
91226790
DK
3929 if (unlikely(skb_headlen(skb) > hlen)) {
3930 nbd++;
6383c0b3
AE
3931 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3932 &tx_start_bd, hlen,
91226790
DK
3933 bd_prod);
3934 }
619c5cb6 3935 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3936 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3937 xmit_type);
f2e0899f 3938 else
44dbc78e 3939 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
9f6c9258 3940 }
2297a2da
VZ
3941
3942 /* Set the PBD's parsing_data field if not zero
3943 * (for the chips newer than 57711).
3944 */
3945 if (pbd_e2_parsing_data)
3946 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3947
9f6c9258
DK
3948 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3949
f85582f8 3950 /* Handle fragmented skb */
9f6c9258
DK
3951 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3952 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3953
9e903e08
ED
3954 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3955 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3956 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3957 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3958
51c1a580
MS
3959 DP(NETIF_MSG_TX_QUEUED,
3960 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3961
3962 /* we need unmap all buffers already mapped
3963 * for this SKB;
3964 * first_bd->nbd need to be properly updated
3965 * before call to bnx2x_free_tx_pkt
3966 */
3967 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3968 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3969 TX_BD(txdata->tx_pkt_prod),
3970 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3971 return NETDEV_TX_OK;
3972 }
3973
9f6c9258 3974 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3975 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3976 if (total_pkt_bd == NULL)
6383c0b3 3977 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3978
9f6c9258
DK
3979 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3980 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3981 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3982 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3983 nbd++;
9f6c9258
DK
3984
3985 DP(NETIF_MSG_TX_QUEUED,
3986 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3987 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3988 le16_to_cpu(tx_data_bd->nbytes));
3989 }
3990
3991 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3992
619c5cb6
VZ
3993 /* update with actual num BDs */
3994 first_bd->nbd = cpu_to_le16(nbd);
3995
9f6c9258
DK
3996 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3997
3998 /* now send a tx doorbell, counting the next BD
3999 * if the packet contains or ends with it
4000 */
4001 if (TX_BD_POFF(bd_prod) < nbd)
4002 nbd++;
4003
619c5cb6
VZ
4004 /* total_pkt_bytes should be set on the first data BD if
4005 * it's not an LSO packet and there is more than one
4006 * data BD. In this case pkt_size is limited by an MTU value.
4007 * However we prefer to set it for an LSO packet (while we don't
4008 * have to) in order to save some CPU cycles in a none-LSO
4009 * case, when we much more care about them.
4010 */
9f6c9258
DK
4011 if (total_pkt_bd != NULL)
4012 total_pkt_bd->total_pkt_bytes = pkt_size;
4013
523224a3 4014 if (pbd_e1x)
9f6c9258 4015 DP(NETIF_MSG_TX_QUEUED,
51c1a580 4016 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
4017 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4018 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4019 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4020 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
4021 if (pbd_e2)
4022 DP(NETIF_MSG_TX_QUEUED,
4023 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
4024 pbd_e2,
4025 pbd_e2->data.mac_addr.dst_hi,
4026 pbd_e2->data.mac_addr.dst_mid,
4027 pbd_e2->data.mac_addr.dst_lo,
4028 pbd_e2->data.mac_addr.src_hi,
4029 pbd_e2->data.mac_addr.src_mid,
4030 pbd_e2->data.mac_addr.src_lo,
f2e0899f 4031 pbd_e2->parsing_data);
9f6c9258
DK
4032 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4033
2df1a70a
TH
4034 netdev_tx_sent_queue(txq, skb->len);
4035
8373c57d
WB
4036 skb_tx_timestamp(skb);
4037
6383c0b3 4038 txdata->tx_pkt_prod++;
9f6c9258
DK
4039 /*
4040 * Make sure that the BD data is updated before updating the producer
4041 * since FW might read the BD right after the producer is updated.
4042 * This is only applicable for weak-ordered memory model archs such
4043 * as IA-64. The following barrier is also mandatory since FW will
4044 * assumes packets must have BDs.
4045 */
4046 wmb();
4047
6383c0b3 4048 txdata->tx_db.data.prod += nbd;
9f6c9258 4049 barrier();
f85582f8 4050
6383c0b3 4051 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4052
4053 mmiowb();
4054
6383c0b3 4055 txdata->tx_bd_prod += nbd;
9f6c9258 4056
7df2dc6b 4057 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4058 netif_tx_stop_queue(txq);
4059
4060 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4061 * ordering of set_bit() in netif_tx_stop_queue() and read of
4062 * fp->bd_tx_cons */
4063 smp_mb();
4064
15192a8c 4065 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4066 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4067 netif_tx_wake_queue(txq);
4068 }
6383c0b3 4069 txdata->tx_pkt++;
9f6c9258
DK
4070
4071 return NETDEV_TX_OK;
4072}
f85582f8 4073
6383c0b3
AE
4074/**
4075 * bnx2x_setup_tc - routine to configure net_device for multi tc
4076 *
4077 * @netdev: net device to configure
4078 * @tc: number of traffic classes to enable
4079 *
4080 * callback connected to the ndo_setup_tc function pointer
4081 */
4082int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4083{
4084 int cos, prio, count, offset;
4085 struct bnx2x *bp = netdev_priv(dev);
4086
4087 /* setup tc must be called under rtnl lock */
4088 ASSERT_RTNL();
4089
16a5fd92 4090 /* no traffic classes requested. Aborting */
6383c0b3
AE
4091 if (!num_tc) {
4092 netdev_reset_tc(dev);
4093 return 0;
4094 }
4095
4096 /* requested to support too many traffic classes */
4097 if (num_tc > bp->max_cos) {
6bf07b8e 4098 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4099 num_tc, bp->max_cos);
6383c0b3
AE
4100 return -EINVAL;
4101 }
4102
4103 /* declare amount of supported traffic classes */
4104 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4105 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4106 return -EINVAL;
4107 }
4108
4109 /* configure priority to traffic class mapping */
4110 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4111 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4112 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4113 "mapping priority %d to tc %d\n",
6383c0b3
AE
4114 prio, bp->prio_to_cos[prio]);
4115 }
4116
16a5fd92 4117 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4118 This can be used for ets or pfc, and save the effort of setting
4119 up a multio class queue disc or negotiating DCBX with a switch
4120 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4121 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4122 for (prio = 1; prio < 16; prio++) {
4123 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4124 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4125 } */
4126
4127 /* configure traffic class to transmission queue mapping */
4128 for (cos = 0; cos < bp->max_cos; cos++) {
4129 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4130 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4131 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4132 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4133 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4134 cos, offset, count);
4135 }
4136
4137 return 0;
4138}
4139
9f6c9258
DK
4140/* called with rtnl_lock */
4141int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4142{
4143 struct sockaddr *addr = p;
4144 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4145 int rc = 0;
9f6c9258 4146
51c1a580
MS
4147 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4148 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4149 return -EINVAL;
51c1a580 4150 }
614c76df 4151
a3348722
BW
4152 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4153 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4154 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4155 return -EINVAL;
51c1a580 4156 }
9f6c9258 4157
619c5cb6
VZ
4158 if (netif_running(dev)) {
4159 rc = bnx2x_set_eth_mac(bp, false);
4160 if (rc)
4161 return rc;
4162 }
4163
9f6c9258 4164 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4165
523224a3 4166 if (netif_running(dev))
619c5cb6 4167 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4168
619c5cb6 4169 return rc;
9f6c9258
DK
4170}
4171
b3b83c3f
DK
4172static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4173{
4174 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4175 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4176 u8 cos;
b3b83c3f
DK
4177
4178 /* Common */
55c11941 4179
b3b83c3f
DK
4180 if (IS_FCOE_IDX(fp_index)) {
4181 memset(sb, 0, sizeof(union host_hc_status_block));
4182 fp->status_blk_mapping = 0;
b3b83c3f 4183 } else {
b3b83c3f 4184 /* status blocks */
619c5cb6 4185 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4186 BNX2X_PCI_FREE(sb->e2_sb,
4187 bnx2x_fp(bp, fp_index,
4188 status_blk_mapping),
4189 sizeof(struct host_hc_status_block_e2));
4190 else
4191 BNX2X_PCI_FREE(sb->e1x_sb,
4192 bnx2x_fp(bp, fp_index,
4193 status_blk_mapping),
4194 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4195 }
55c11941 4196
b3b83c3f
DK
4197 /* Rx */
4198 if (!skip_rx_queue(bp, fp_index)) {
4199 bnx2x_free_rx_bds(fp);
4200
4201 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4202 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4203 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4204 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4205 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4206
4207 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4208 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4209 sizeof(struct eth_fast_path_rx_cqe) *
4210 NUM_RCQ_BD);
4211
4212 /* SGE ring */
4213 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4214 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4215 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4216 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4217 }
4218
4219 /* Tx */
4220 if (!skip_tx_queue(bp, fp_index)) {
4221 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4222 for_each_cos_in_tx_queue(fp, cos) {
65565884 4223 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4224
51c1a580 4225 DP(NETIF_MSG_IFDOWN,
94f05b0f 4226 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4227 fp_index, cos, txdata->cid);
4228
4229 BNX2X_FREE(txdata->tx_buf_ring);
4230 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4231 txdata->tx_desc_mapping,
4232 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4233 }
b3b83c3f
DK
4234 }
4235 /* end of fastpath */
4236}
4237
a8f47eb7 4238static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4239{
4240 int i;
4241 for_each_cnic_queue(bp, i)
4242 bnx2x_free_fp_mem_at(bp, i);
4243}
4244
b3b83c3f
DK
4245void bnx2x_free_fp_mem(struct bnx2x *bp)
4246{
4247 int i;
55c11941 4248 for_each_eth_queue(bp, i)
b3b83c3f
DK
4249 bnx2x_free_fp_mem_at(bp, i);
4250}
4251
1191cb83 4252static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4253{
4254 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4255 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4256 bnx2x_fp(bp, index, sb_index_values) =
4257 (__le16 *)status_blk.e2_sb->sb.index_values;
4258 bnx2x_fp(bp, index, sb_running_index) =
4259 (__le16 *)status_blk.e2_sb->sb.running_index;
4260 } else {
4261 bnx2x_fp(bp, index, sb_index_values) =
4262 (__le16 *)status_blk.e1x_sb->sb.index_values;
4263 bnx2x_fp(bp, index, sb_running_index) =
4264 (__le16 *)status_blk.e1x_sb->sb.running_index;
4265 }
4266}
4267
1191cb83
ED
4268/* Returns the number of actually allocated BDs */
4269static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4270 int rx_ring_size)
4271{
4272 struct bnx2x *bp = fp->bp;
4273 u16 ring_prod, cqe_ring_prod;
4274 int i, failure_cnt = 0;
4275
4276 fp->rx_comp_cons = 0;
4277 cqe_ring_prod = ring_prod = 0;
4278
4279 /* This routine is called only during fo init so
4280 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4281 */
4282 for (i = 0; i < rx_ring_size; i++) {
996dedba 4283 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4284 failure_cnt++;
4285 continue;
4286 }
4287 ring_prod = NEXT_RX_IDX(ring_prod);
4288 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4289 WARN_ON(ring_prod <= (i - failure_cnt));
4290 }
4291
4292 if (failure_cnt)
4293 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4294 i - failure_cnt, fp->index);
4295
4296 fp->rx_bd_prod = ring_prod;
4297 /* Limit the CQE producer by the CQE ring size */
4298 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4299 cqe_ring_prod);
4300 fp->rx_pkt = fp->rx_calls = 0;
4301
15192a8c 4302 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4303
4304 return i - failure_cnt;
4305}
4306
4307static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4308{
4309 int i;
4310
4311 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4312 struct eth_rx_cqe_next_page *nextpg;
4313
4314 nextpg = (struct eth_rx_cqe_next_page *)
4315 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4316 nextpg->addr_hi =
4317 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4318 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4319 nextpg->addr_lo =
4320 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4321 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4322 }
4323}
4324
b3b83c3f
DK
4325static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4326{
4327 union host_hc_status_block *sb;
4328 struct bnx2x_fastpath *fp = &bp->fp[index];
4329 int ring_size = 0;
6383c0b3 4330 u8 cos;
c2188952 4331 int rx_ring_size = 0;
b3b83c3f 4332
a3348722
BW
4333 if (!bp->rx_ring_size &&
4334 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4335 rx_ring_size = MIN_RX_SIZE_NONTPA;
4336 bp->rx_ring_size = rx_ring_size;
55c11941 4337 } else if (!bp->rx_ring_size) {
c2188952
VZ
4338 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4339
065f8b92
YM
4340 if (CHIP_IS_E3(bp)) {
4341 u32 cfg = SHMEM_RD(bp,
4342 dev_info.port_hw_config[BP_PORT(bp)].
4343 default_cfg);
4344
4345 /* Decrease ring size for 1G functions */
4346 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4347 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4348 rx_ring_size /= 10;
4349 }
d760fc37 4350
c2188952
VZ
4351 /* allocate at least number of buffers required by FW */
4352 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4353 MIN_RX_SIZE_TPA, rx_ring_size);
4354
4355 bp->rx_ring_size = rx_ring_size;
614c76df 4356 } else /* if rx_ring_size specified - use it */
c2188952 4357 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4358
04c46736
YM
4359 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4360
b3b83c3f
DK
4361 /* Common */
4362 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4363
b3b83c3f 4364 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4365 /* status blocks */
619c5cb6 4366 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4367 BNX2X_PCI_ALLOC(sb->e2_sb,
4368 &bnx2x_fp(bp, index, status_blk_mapping),
4369 sizeof(struct host_hc_status_block_e2));
4370 else
4371 BNX2X_PCI_ALLOC(sb->e1x_sb,
4372 &bnx2x_fp(bp, index, status_blk_mapping),
4373 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4374 }
8eef2af1
DK
4375
4376 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4377 * set shortcuts for it.
4378 */
4379 if (!IS_FCOE_IDX(index))
4380 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4381
4382 /* Tx */
4383 if (!skip_tx_queue(bp, index)) {
4384 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4385 for_each_cos_in_tx_queue(fp, cos) {
65565884 4386 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4387
51c1a580
MS
4388 DP(NETIF_MSG_IFUP,
4389 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4390 index, cos);
4391
4392 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4393 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4394 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4395 &txdata->tx_desc_mapping,
b3b83c3f 4396 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4397 }
b3b83c3f
DK
4398 }
4399
4400 /* Rx */
4401 if (!skip_rx_queue(bp, index)) {
4402 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4403 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4404 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4405 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4406 &bnx2x_fp(bp, index, rx_desc_mapping),
4407 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4408
75b29459
DK
4409 /* Seed all CQEs by 1s */
4410 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4411 &bnx2x_fp(bp, index, rx_comp_mapping),
4412 sizeof(struct eth_fast_path_rx_cqe) *
4413 NUM_RCQ_BD);
b3b83c3f
DK
4414
4415 /* SGE ring */
4416 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4417 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4418 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4419 &bnx2x_fp(bp, index, rx_sge_mapping),
4420 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4421 /* RX BD ring */
4422 bnx2x_set_next_page_rx_bd(fp);
4423
4424 /* CQ ring */
4425 bnx2x_set_next_page_rx_cq(fp);
4426
4427 /* BDs */
4428 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4429 if (ring_size < rx_ring_size)
4430 goto alloc_mem_err;
4431 }
4432
4433 return 0;
4434
4435/* handles low memory cases */
4436alloc_mem_err:
4437 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4438 index, ring_size);
4439 /* FW will drop all packets if queue is not big enough,
4440 * In these cases we disable the queue
6383c0b3 4441 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4442 */
4443 if (ring_size < (fp->disable_tpa ?
eb722d7a 4444 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4445 /* release memory allocated for this queue */
4446 bnx2x_free_fp_mem_at(bp, index);
4447 return -ENOMEM;
4448 }
4449 return 0;
4450}
4451
a8f47eb7 4452static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4453{
4454 if (!NO_FCOE(bp))
4455 /* FCoE */
4456 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4457 /* we will fail load process instead of mark
4458 * NO_FCOE_FLAG
4459 */
4460 return -ENOMEM;
4461
4462 return 0;
4463}
4464
a8f47eb7 4465static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
b3b83c3f
DK
4466{
4467 int i;
4468
55c11941
MS
4469 /* 1. Allocate FP for leading - fatal if error
4470 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4471 */
4472
4473 /* leading */
4474 if (bnx2x_alloc_fp_mem_at(bp, 0))
4475 return -ENOMEM;
6383c0b3 4476
b3b83c3f
DK
4477 /* RSS */
4478 for_each_nondefault_eth_queue(bp, i)
4479 if (bnx2x_alloc_fp_mem_at(bp, i))
4480 break;
4481
4482 /* handle memory failures */
4483 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4484 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4485
4486 WARN_ON(delta < 0);
4864a16a 4487 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4488 if (CNIC_SUPPORT(bp))
4489 /* move non eth FPs next to last eth FP
4490 * must be done in that order
4491 * FCOE_IDX < FWD_IDX < OOO_IDX
4492 */
b3b83c3f 4493
55c11941
MS
4494 /* move FCoE fp even NO_FCOE_FLAG is on */
4495 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4496 bp->num_ethernet_queues -= delta;
4497 bp->num_queues = bp->num_ethernet_queues +
4498 bp->num_cnic_queues;
b3b83c3f
DK
4499 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4500 bp->num_queues + delta, bp->num_queues);
4501 }
4502
4503 return 0;
4504}
d6214d7a 4505
523224a3
DK
4506void bnx2x_free_mem_bp(struct bnx2x *bp)
4507{
c3146eb6
DK
4508 int i;
4509
4510 for (i = 0; i < bp->fp_array_size; i++)
4511 kfree(bp->fp[i].tpa_info);
523224a3 4512 kfree(bp->fp);
15192a8c
BW
4513 kfree(bp->sp_objs);
4514 kfree(bp->fp_stats);
65565884 4515 kfree(bp->bnx2x_txq);
523224a3
DK
4516 kfree(bp->msix_table);
4517 kfree(bp->ilt);
4518}
4519
0329aba1 4520int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4521{
4522 struct bnx2x_fastpath *fp;
4523 struct msix_entry *tbl;
4524 struct bnx2x_ilt *ilt;
6383c0b3 4525 int msix_table_size = 0;
55c11941 4526 int fp_array_size, txq_array_size;
15192a8c 4527 int i;
6383c0b3
AE
4528
4529 /*
4530 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4531 * path IGU SBs plus default SB (for PF only).
6383c0b3 4532 */
1ab4434c
AE
4533 msix_table_size = bp->igu_sb_cnt;
4534 if (IS_PF(bp))
4535 msix_table_size++;
4536 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4537
6383c0b3 4538 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4539 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4540 bp->fp_array_size = fp_array_size;
4541 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4542
c3146eb6 4543 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4544 if (!fp)
4545 goto alloc_err;
c3146eb6 4546 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4547 fp[i].tpa_info =
4548 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4549 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4550 if (!(fp[i].tpa_info))
4551 goto alloc_err;
4552 }
4553
523224a3
DK
4554 bp->fp = fp;
4555
15192a8c 4556 /* allocate sp objs */
c3146eb6 4557 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4558 GFP_KERNEL);
4559 if (!bp->sp_objs)
4560 goto alloc_err;
4561
4562 /* allocate fp_stats */
c3146eb6 4563 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4564 GFP_KERNEL);
4565 if (!bp->fp_stats)
4566 goto alloc_err;
4567
65565884 4568 /* Allocate memory for the transmission queues array */
55c11941
MS
4569 txq_array_size =
4570 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4571 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4572
4573 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4574 GFP_KERNEL);
65565884
MS
4575 if (!bp->bnx2x_txq)
4576 goto alloc_err;
4577
523224a3 4578 /* msix table */
01e23742 4579 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4580 if (!tbl)
4581 goto alloc_err;
4582 bp->msix_table = tbl;
4583
4584 /* ilt */
4585 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4586 if (!ilt)
4587 goto alloc_err;
4588 bp->ilt = ilt;
4589
4590 return 0;
4591alloc_err:
4592 bnx2x_free_mem_bp(bp);
4593 return -ENOMEM;
523224a3
DK
4594}
4595
a9fccec7 4596int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4597{
4598 struct bnx2x *bp = netdev_priv(dev);
4599
4600 if (unlikely(!netif_running(dev)))
4601 return 0;
4602
5d07d868 4603 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4604 return bnx2x_nic_load(bp, LOAD_NORMAL);
4605}
4606
1ac9e428
YR
4607int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4608{
4609 u32 sel_phy_idx = 0;
4610 if (bp->link_params.num_phys <= 1)
4611 return INT_PHY;
4612
4613 if (bp->link_vars.link_up) {
4614 sel_phy_idx = EXT_PHY1;
4615 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4616 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4617 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4618 sel_phy_idx = EXT_PHY2;
4619 } else {
4620
4621 switch (bnx2x_phy_selection(&bp->link_params)) {
4622 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4623 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4624 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4625 sel_phy_idx = EXT_PHY1;
4626 break;
4627 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4628 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4629 sel_phy_idx = EXT_PHY2;
4630 break;
4631 }
4632 }
4633
4634 return sel_phy_idx;
1ac9e428
YR
4635}
4636int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4637{
4638 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4639 /*
2de67439 4640 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4641 * swapping is enabled). So when swapping is enabled, we need to reverse
4642 * the configuration
4643 */
4644
4645 if (bp->link_params.multi_phy_config &
4646 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4647 if (sel_phy_idx == EXT_PHY1)
4648 sel_phy_idx = EXT_PHY2;
4649 else if (sel_phy_idx == EXT_PHY2)
4650 sel_phy_idx = EXT_PHY1;
4651 }
4652 return LINK_CONFIG_IDX(sel_phy_idx);
4653}
4654
55c11941 4655#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4656int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4657{
4658 struct bnx2x *bp = netdev_priv(dev);
4659 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4660
4661 switch (type) {
4662 case NETDEV_FCOE_WWNN:
4663 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4664 cp->fcoe_wwn_node_name_lo);
4665 break;
4666 case NETDEV_FCOE_WWPN:
4667 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4668 cp->fcoe_wwn_port_name_lo);
4669 break;
4670 default:
51c1a580 4671 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4672 return -EINVAL;
4673 }
4674
4675 return 0;
4676}
4677#endif
4678
9f6c9258
DK
4679/* called with rtnl_lock */
4680int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4681{
4682 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4683
4684 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4685 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4686 return -EAGAIN;
4687 }
4688
4689 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4690 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4691 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4692 return -EINVAL;
51c1a580 4693 }
9f6c9258
DK
4694
4695 /* This does not race with packet allocation
4696 * because the actual alloc size is
4697 * only updated as part of load
4698 */
4699 dev->mtu = new_mtu;
4700
66371c44
MM
4701 return bnx2x_reload_if_running(dev);
4702}
4703
c8f44aff 4704netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4705 netdev_features_t features)
66371c44
MM
4706{
4707 struct bnx2x *bp = netdev_priv(dev);
4708
4709 /* TPA requires Rx CSUM offloading */
621b4d66 4710 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4711 features &= ~NETIF_F_LRO;
621b4d66
DK
4712 features &= ~NETIF_F_GRO;
4713 }
66371c44
MM
4714
4715 return features;
4716}
4717
c8f44aff 4718int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4719{
4720 struct bnx2x *bp = netdev_priv(dev);
4721 u32 flags = bp->flags;
8802f579 4722 u32 changes;
538dd2e3 4723 bool bnx2x_reload = false;
66371c44
MM
4724
4725 if (features & NETIF_F_LRO)
4726 flags |= TPA_ENABLE_FLAG;
4727 else
4728 flags &= ~TPA_ENABLE_FLAG;
4729
621b4d66
DK
4730 if (features & NETIF_F_GRO)
4731 flags |= GRO_ENABLE_FLAG;
4732 else
4733 flags &= ~GRO_ENABLE_FLAG;
4734
538dd2e3
MB
4735 if (features & NETIF_F_LOOPBACK) {
4736 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4737 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4738 bnx2x_reload = true;
4739 }
4740 } else {
4741 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4742 bp->link_params.loopback_mode = LOOPBACK_NONE;
4743 bnx2x_reload = true;
4744 }
4745 }
4746
8802f579
ED
4747 changes = flags ^ bp->flags;
4748
16a5fd92 4749 /* if GRO is changed while LRO is enabled, don't force a reload */
8802f579
ED
4750 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4751 changes &= ~GRO_ENABLE_FLAG;
4752
4753 if (changes)
538dd2e3 4754 bnx2x_reload = true;
8802f579
ED
4755
4756 bp->flags = flags;
66371c44 4757
538dd2e3 4758 if (bnx2x_reload) {
66371c44
MM
4759 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4760 return bnx2x_reload_if_running(dev);
4761 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4762 }
4763
66371c44 4764 return 0;
9f6c9258
DK
4765}
4766
4767void bnx2x_tx_timeout(struct net_device *dev)
4768{
4769 struct bnx2x *bp = netdev_priv(dev);
4770
4771#ifdef BNX2X_STOP_ON_ERROR
4772 if (!bp->panic)
4773 bnx2x_panic();
4774#endif
7be08a72
AE
4775
4776 smp_mb__before_clear_bit();
4777 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4778 smp_mb__after_clear_bit();
4779
9f6c9258 4780 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4781 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4782}
4783
9f6c9258
DK
4784int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4785{
4786 struct net_device *dev = pci_get_drvdata(pdev);
4787 struct bnx2x *bp;
4788
4789 if (!dev) {
4790 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4791 return -ENODEV;
4792 }
4793 bp = netdev_priv(dev);
4794
4795 rtnl_lock();
4796
4797 pci_save_state(pdev);
4798
4799 if (!netif_running(dev)) {
4800 rtnl_unlock();
4801 return 0;
4802 }
4803
4804 netif_device_detach(dev);
4805
5d07d868 4806 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4807
4808 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4809
4810 rtnl_unlock();
4811
4812 return 0;
4813}
4814
4815int bnx2x_resume(struct pci_dev *pdev)
4816{
4817 struct net_device *dev = pci_get_drvdata(pdev);
4818 struct bnx2x *bp;
4819 int rc;
4820
4821 if (!dev) {
4822 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4823 return -ENODEV;
4824 }
4825 bp = netdev_priv(dev);
4826
4827 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4828 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4829 return -EAGAIN;
4830 }
4831
4832 rtnl_lock();
4833
4834 pci_restore_state(pdev);
4835
4836 if (!netif_running(dev)) {
4837 rtnl_unlock();
4838 return 0;
4839 }
4840
4841 bnx2x_set_power_state(bp, PCI_D0);
4842 netif_device_attach(dev);
4843
4844 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4845
4846 rtnl_unlock();
4847
4848 return rc;
4849}
619c5cb6 4850
619c5cb6
VZ
4851void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4852 u32 cid)
4853{
b9871bcf
AE
4854 if (!cxt) {
4855 BNX2X_ERR("bad context pointer %p\n", cxt);
4856 return;
4857 }
4858
619c5cb6
VZ
4859 /* ustorm cxt validation */
4860 cxt->ustorm_ag_context.cdu_usage =
4861 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4862 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4863 /* xcontext validation */
4864 cxt->xstorm_ag_context.cdu_reserved =
4865 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4866 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4867}
4868
1191cb83
ED
4869static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4870 u8 fw_sb_id, u8 sb_index,
4871 u8 ticks)
619c5cb6 4872{
619c5cb6
VZ
4873 u32 addr = BAR_CSTRORM_INTMEM +
4874 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4875 REG_WR8(bp, addr, ticks);
51c1a580
MS
4876 DP(NETIF_MSG_IFUP,
4877 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4878 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4879}
4880
1191cb83
ED
4881static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4882 u16 fw_sb_id, u8 sb_index,
4883 u8 disable)
619c5cb6
VZ
4884{
4885 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4886 u32 addr = BAR_CSTRORM_INTMEM +
4887 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 4888 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
4889 /* clear and set */
4890 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4891 flags |= enable_flag;
0c14e5ce 4892 REG_WR8(bp, addr, flags);
51c1a580
MS
4893 DP(NETIF_MSG_IFUP,
4894 "port %x fw_sb_id %d sb_index %d disable %d\n",
4895 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4896}
4897
4898void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4899 u8 sb_index, u8 disable, u16 usec)
4900{
4901 int port = BP_PORT(bp);
4902 u8 ticks = usec / BNX2X_BTR;
4903
4904 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4905
4906 disable = disable ? 1 : (usec ? 0 : 1);
4907 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4908}