Merge branch 'pm-pci'
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
4ad79e13 1/* bnx2x_cmn.c: QLogic Everest network driver.
9f6c9258 2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
4ad79e13
YM
4 * Copyright (c) 2014 QLogic Corporation
5 * All rights reserved
9f6c9258
DK
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
08f6dd89 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
9f6c9258
DK
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
17 *
18 */
19
f1deab50
JP
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
9f6c9258 22#include <linux/etherdevice.h>
9bcc0893 23#include <linux/if_vlan.h>
a6b7a407 24#include <linux/interrupt.h>
9f6c9258 25#include <linux/ip.h>
c9931896 26#include <linux/crash_dump.h>
9969085e 27#include <net/tcp.h>
f2e0899f 28#include <net/ipv6.h>
7f3e01fe 29#include <net/ip6_checksum.h>
076bb0c8 30#include <net/busy_poll.h>
c0cba59e 31#include <linux/prefetch.h>
9f6c9258 32#include "bnx2x_cmn.h"
523224a3 33#include "bnx2x_init.h"
042181f5 34#include "bnx2x_sp.h"
9f6c9258 35
a8f47eb7 36static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42{
43 int i;
44
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
a8f47eb7 49 }
50}
51
52static void bnx2x_add_all_napi(struct bnx2x *bp)
53{
54 int i;
55
56 /* Add NAPI objects */
57 for_each_eth_queue(bp, i) {
58 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59 bnx2x_poll, NAPI_POLL_WEIGHT);
a8f47eb7 60 }
61}
62
63static int bnx2x_calc_num_queues(struct bnx2x *bp)
64{
7d0445d6 65 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
ff2ad307
MS
66
67 /* Reduce memory usage in kdump environment by using only one queue */
c9931896 68 if (is_kdump_kernel())
ff2ad307
MS
69 nq = 1;
70
7d0445d6
MS
71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 return nq;
a8f47eb7 73}
74
b3b83c3f
DK
75/**
76 * bnx2x_move_fp - move content of the fastpath structure.
77 *
78 * @bp: driver handle
79 * @from: source FP index
80 * @to: destination FP index
81 *
82 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
83 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
65565884
MS
85 * source onto the target. Update txdata pointers and related
86 * content.
b3b83c3f
DK
87 */
88static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89{
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
96 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 98 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
99
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
102
b3b83c3f
DK
103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
105 to_fp->index = to;
65565884 106
34d5626a
YM
107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
109 */
110 to_fp->tpa_info = old_tpa_info;
111
15192a8c
BW
112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
65565884
MS
118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
121 */
122
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 (bp)->max_cos;
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 }
130
4864a16a
YM
131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
135}
136
8ca5e17e
AE
137/**
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
139 *
140 * @bp: driver handle
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
143 *
144 */
145void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146{
147 if (IS_PF(bp)) {
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 "bc %d.%d.%d%s%s",
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 } else {
6411280a 161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
162 }
163}
164
4864a16a
YM
165/**
166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167 *
168 * @bp: driver handle
169 * @delta: number of eth queues which were not allocated
170 */
171static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172{
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 176 * backward along the array could cause memory to be overridden
4864a16a
YM
177 */
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
182
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 }
187 }
188}
189
a8f47eb7 190int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
619c5cb6 191
9f6c9258
DK
192/* free skb in the packet ring at pos idx
193 * return idx of last bd freed
194 */
6383c0b3 195static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
9f6c9258 198{
6383c0b3 199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 int nbd;
95e92fd4 205 u16 split_bd_len = 0;
9f6c9258
DK
206
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
208 prefetch(&skb->end);
209
51c1a580 210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 211 txdata->txq_index, idx, tx_buf, skb);
9f6c9258 212
6383c0b3 213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258
DK
214
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216#ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
219 bnx2x_panic();
220 }
221#endif
222 new_cons = nbd + tx_buf->first_bd;
223
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227 /* Skip a parse bd... */
228 --nbd;
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
fe26566d
DK
231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
233 --nbd;
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 }
236
95e92fd4 237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
9f6c9258 238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
95e92fd4
MS
239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
9f6c9258
DK
241 --nbd;
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 }
244
95e92fd4
MS
245 /* unmap first bd */
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 DMA_TO_DEVICE);
249
9f6c9258
DK
250 /* now free frags */
251 while (nbd > 0) {
252
6383c0b3 253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 if (--nbd)
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 }
259
260 /* release skb */
261 WARN_ON(!skb);
d8290ae5 262 if (likely(skb)) {
2df1a70a
TH
263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len;
e1615903 265 dev_kfree_skb_any(skb);
2df1a70a 266 }
d8290ae5 267
9f6c9258
DK
268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL;
270
271 return new_cons;
272}
273
6383c0b3 274int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 275{
9f6c9258 276 struct netdev_queue *txq;
6383c0b3 277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 278 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
279
280#ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
282 return -1;
283#endif
284
6383c0b3
AE
285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
288
289 while (sw_cons != hw_cons) {
290 u16 pkt_cons;
291
292 pkt_cons = TX_BD(sw_cons);
293
51c1a580
MS
294 DP(NETIF_MSG_TX_DONE,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 296 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 297
2df1a70a 298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 299 &pkts_compl, &bytes_compl);
2df1a70a 300
9f6c9258
DK
301 sw_cons++;
302 }
303
2df1a70a
TH
304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
6383c0b3
AE
306 txdata->tx_pkt_cons = sw_cons;
307 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
308
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
313 * forever.
619c5cb6
VZ
314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
317 */
318 smp_mb();
319
9f6c9258 320 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 321 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 *
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
328 * stops the queue
329 */
330
331 __netif_tx_lock(txq, smp_processor_id());
332
333 if ((netif_tx_queue_stopped(txq)) &&
334 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
336 netif_tx_wake_queue(txq);
337
338 __netif_tx_unlock(txq);
339 }
340 return 0;
341}
342
343static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344 u16 idx)
345{
346 u16 last_max = fp->last_max_sge;
347
348 if (SUB_S16(idx, last_max) > 0)
349 fp->last_max_sge = idx;
350}
351
621b4d66
DK
352static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 u16 sge_len,
354 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
355{
356 struct bnx2x *bp = fp->bp;
9f6c9258
DK
357 u16 last_max, last_elem, first_elem;
358 u16 delta = 0;
359 u16 i;
360
361 if (!sge_len)
362 return;
363
364 /* First mark all used pages */
365 for (i = 0; i < sge_len; i++)
619c5cb6 366 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
368
369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
371
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp->sge_mask));
523224a3 374 bnx2x_update_last_max_sge(fp,
621b4d66 375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
376
377 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
380
381 /* If ring is not full */
382 if (last_elem + 1 != first_elem)
383 last_elem++;
384
385 /* Now update the prod */
386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 if (likely(fp->sge_mask[i]))
388 break;
389
619c5cb6
VZ
390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
392 }
393
394 if (delta > 0) {
395 fp->rx_sge_prod += delta;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp);
398 }
399
400 DP(NETIF_MSG_RX_STATUS,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp->last_max_sge, fp->rx_sge_prod);
403}
404
2de67439 405/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
406 * CQE (calculated by HW).
407 */
408static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb 409 const struct eth_fast_path_rx_cqe *cqe,
5495ab75 410 enum pkt_hash_types *rxhash_type)
e52fcb24 411{
2de67439 412 /* Get Toeplitz hash from CQE */
e52fcb24 413 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 enum eth_rss_hash_type htype;
416
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
5495ab75
TH
418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 (htype == TCP_IPV6_HASH_TYPE)) ?
420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
e52fcb24 422 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb 423 }
5495ab75 424 *rxhash_type = PKT_HASH_TYPE_NONE;
e52fcb24
ED
425 return 0;
426}
427
9f6c9258 428static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 429 u16 cons, u16 prod,
619c5cb6 430 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
431{
432 struct bnx2x *bp = fp->bp;
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 dma_addr_t mapping;
619c5cb6
VZ
437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 439
619c5cb6
VZ
440 /* print error if current state != stop */
441 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
442 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
e52fcb24 444 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 445 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 446 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
447 fp->rx_buf_size, DMA_FROM_DEVICE);
448 /*
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
452 */
453
454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455 /* Move the BD from the consumer to the producer */
e52fcb24 456 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
457 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458 return;
459 }
9f6c9258 460
e52fcb24
ED
461 /* move empty data from pool to prod */
462 prod_rx_buf->data = first_buf->data;
619c5cb6 463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 464 /* point prod_bd to new data */
9f6c9258
DK
465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
619c5cb6
VZ
468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf = *cons_rx_buf;
470
471 /* mark bin state as START */
472 tpa_info->parsing_flags =
473 le16_to_cpu(cqe->pars_flags.flags);
474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 tpa_info->tpa_state = BNX2X_TPA_START;
476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 tpa_info->placement_offset = cqe->placement_offset;
5495ab75 478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
621b4d66
DK
479 if (fp->mode == TPA_MODE_GRO) {
480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
482 tpa_info->gro_size = gro_size;
483 }
619c5cb6 484
9f6c9258
DK
485#ifdef BNX2X_STOP_ON_ERROR
486 fp->tpa_queue_used |= (1 << queue);
9f6c9258 487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
9f6c9258
DK
488 fp->tpa_queue_used);
489#endif
490}
491
e4e3c02a
VZ
492/* Timestamp option length allowed for TPA aggregation:
493 *
494 * nop nop kind length echo val
495 */
496#define TPA_TSTAMP_OPT_LEN 12
497/**
cbf1de72 498 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 499 *
cbf1de72 500 * @skb: packet skb
e8920674
DK
501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
503 * aggregation.
cbf1de72 504 * @pkt_len: length of all segments
e8920674
DK
505 *
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
2de67439 508 * Compute number of aggregated segments, and gso_type.
e4e3c02a 509 */
cbf1de72 510static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
511 u16 len_on_bd, unsigned int pkt_len,
512 u16 num_of_coalesced_segs)
e4e3c02a 513{
cbf1de72 514 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 515 * other than timestamp or IPv6 extension headers.
e4e3c02a 516 */
619c5cb6
VZ
517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 520 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 521 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 } else {
619c5cb6 524 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526 }
e4e3c02a
VZ
527
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
530 *
531 * Otherwise FW would close the aggregation.
532 */
533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
cbf1de72
YM
536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
540 */
ab5777d7 541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
542}
543
996dedba
MS
544static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 u16 index, gfp_t gfp_mask)
1191cb83 546{
1191cb83
ED
547 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
4cace675 549 struct bnx2x_alloc_pool *pool = &fp->page_pool;
1191cb83
ED
550 dma_addr_t mapping;
551
b9032741 552 if (!pool->page) {
4cace675 553 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
5c9ffde4 554 if (unlikely(!pool->page))
4cace675 555 return -ENOMEM;
4cace675 556
4cace675 557 pool->offset = 0;
1191cb83
ED
558 }
559
8031612d
MS
560 mapping = dma_map_page(&bp->pdev->dev, pool->page,
561 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
562 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
563 BNX2X_ERR("Can't map sge\n");
564 return -ENOMEM;
565 }
566
4cace675
GKB
567 sw_buf->page = pool->page;
568 sw_buf->offset = pool->offset;
569
1191cb83
ED
570 dma_unmap_addr_set(sw_buf, mapping, mapping);
571
572 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
573 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
574
4cace675 575 pool->offset += SGE_PAGE_SIZE;
b9032741
ED
576 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
577 get_page(pool->page);
578 else
579 pool->page = NULL;
1191cb83
ED
580 return 0;
581}
582
9f6c9258 583static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
584 struct bnx2x_agg_info *tpa_info,
585 u16 pages,
586 struct sk_buff *skb,
619c5cb6
VZ
587 struct eth_end_agg_rx_cqe *cqe,
588 u16 cqe_idx)
9f6c9258
DK
589{
590 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
591 u32 i, frag_len, frag_size;
592 int err, j, frag_id = 0;
619c5cb6 593 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 594 u16 full_page = 0, gro_size = 0;
9f6c9258 595
619c5cb6 596 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
597
598 if (fp->mode == TPA_MODE_GRO) {
599 gro_size = tpa_info->gro_size;
600 full_page = tpa_info->full_page;
601 }
9f6c9258
DK
602
603 /* This is needed in order to enable forwarding support */
cbf1de72
YM
604 if (frag_size)
605 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
606 le16_to_cpu(cqe->pkt_len),
607 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 608
9f6c9258 609#ifdef BNX2X_STOP_ON_ERROR
924d75ab 610 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
611 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
612 pages, cqe_idx);
619c5cb6 613 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
614 bnx2x_panic();
615 return -EINVAL;
616 }
617#endif
618
619 /* Run through the SGL and compose the fragmented skb */
620 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 621 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
622
623 /* FW gives the indices of the SGE as if the ring is an array
624 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
625 if (fp->mode == TPA_MODE_GRO)
626 frag_len = min_t(u32, frag_size, (u32)full_page);
627 else /* LRO */
924d75ab 628 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 629
9f6c9258
DK
630 rx_pg = &fp->rx_page_ring[sge_idx];
631 old_rx_pg = *rx_pg;
632
633 /* If we fail to allocate a substitute page, we simply stop
634 where we are and drop the whole packet */
996dedba 635 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 636 if (unlikely(err)) {
15192a8c 637 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
638 return err;
639 }
640
8031612d
MS
641 dma_unmap_page(&bp->pdev->dev,
642 dma_unmap_addr(&old_rx_pg, mapping),
643 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
9f6c9258 644 /* Add one frag and update the appropriate fields in the skb */
621b4d66 645 if (fp->mode == TPA_MODE_LRO)
4cace675
GKB
646 skb_fill_page_desc(skb, j, old_rx_pg.page,
647 old_rx_pg.offset, frag_len);
621b4d66
DK
648 else { /* GRO */
649 int rem;
650 int offset = 0;
651 for (rem = frag_len; rem > 0; rem -= gro_size) {
652 int len = rem > gro_size ? gro_size : rem;
653 skb_fill_page_desc(skb, frag_id++,
4cace675
GKB
654 old_rx_pg.page,
655 old_rx_pg.offset + offset,
656 len);
621b4d66
DK
657 if (offset)
658 get_page(old_rx_pg.page);
659 offset += len;
660 }
661 }
9f6c9258
DK
662
663 skb->data_len += frag_len;
924d75ab 664 skb->truesize += SGE_PAGES;
9f6c9258
DK
665 skb->len += frag_len;
666
667 frag_size -= frag_len;
668 }
669
670 return 0;
671}
672
d46d132c
ED
673static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
674{
675 if (fp->rx_frag_size)
e51423d9 676 skb_free_frag(data);
d46d132c
ED
677 else
678 kfree(data);
679}
680
996dedba 681static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 682{
996dedba
MS
683 if (fp->rx_frag_size) {
684 /* GFP_KERNEL allocations are used only during initialization */
d0164adc 685 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
996dedba
MS
686 return (void *)__get_free_page(gfp_mask);
687
d46d132c 688 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 689 }
d46d132c 690
996dedba 691 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
692}
693
9969085e
YM
694#ifdef CONFIG_INET
695static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
696{
697 const struct iphdr *iph = ip_hdr(skb);
698 struct tcphdr *th;
699
700 skb_set_transport_header(skb, sizeof(struct iphdr));
701 th = tcp_hdr(skb);
702
703 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
704 iph->saddr, iph->daddr, 0);
705}
706
707static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
708{
709 struct ipv6hdr *iph = ipv6_hdr(skb);
710 struct tcphdr *th;
711
712 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
713 th = tcp_hdr(skb);
714
715 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
716 &iph->saddr, &iph->daddr, 0);
717}
2c2d06d5
YM
718
719static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
720 void (*gro_func)(struct bnx2x*, struct sk_buff*))
721{
0e24c0ad 722 skb_reset_network_header(skb);
2c2d06d5
YM
723 gro_func(bp, skb);
724 tcp_gro_complete(skb);
725}
9969085e
YM
726#endif
727
728static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
729 struct sk_buff *skb)
730{
731#ifdef CONFIG_INET
cbf1de72 732 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
733 switch (be16_to_cpu(skb->protocol)) {
734 case ETH_P_IP:
2c2d06d5 735 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
736 break;
737 case ETH_P_IPV6:
2c2d06d5 738 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
739 break;
740 default:
37ed41c4
GP
741 netdev_WARN_ONCE(bp->dev,
742 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
743 be16_to_cpu(skb->protocol));
9969085e 744 }
9969085e
YM
745 }
746#endif
60e66fee 747 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
748 napi_gro_receive(&fp->napi, skb);
749}
750
1191cb83
ED
751static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
752 struct bnx2x_agg_info *tpa_info,
753 u16 pages,
754 struct eth_end_agg_rx_cqe *cqe,
755 u16 cqe_idx)
9f6c9258 756{
619c5cb6 757 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 758 u8 pad = tpa_info->placement_offset;
619c5cb6 759 u16 len = tpa_info->len_on_bd;
e52fcb24 760 struct sk_buff *skb = NULL;
621b4d66 761 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
762 u8 old_tpa_state = tpa_info->tpa_state;
763
764 tpa_info->tpa_state = BNX2X_TPA_STOP;
765
766 /* If we there was an error during the handling of the TPA_START -
767 * drop this aggregation.
768 */
769 if (old_tpa_state == BNX2X_TPA_ERROR)
770 goto drop;
771
e52fcb24 772 /* Try to allocate the new data */
996dedba 773 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
774 /* Unmap skb in the pool anyway, as we are going to change
775 pool entry status to BNX2X_TPA_STOP even if new skb allocation
776 fails. */
777 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 778 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 779 if (likely(new_data))
d46d132c 780 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 781
e52fcb24 782 if (likely(skb)) {
9f6c9258 783#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 784 if (pad + len > fp->rx_buf_size) {
51c1a580 785 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 786 pad, len, fp->rx_buf_size);
9f6c9258
DK
787 bnx2x_panic();
788 return;
789 }
790#endif
791
e52fcb24 792 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 793 skb_put(skb, len);
5495ab75 794 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
9f6c9258
DK
795
796 skb->protocol = eth_type_trans(skb, bp->dev);
797 skb->ip_summed = CHECKSUM_UNNECESSARY;
798
621b4d66
DK
799 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
800 skb, cqe, cqe_idx)) {
619c5cb6 801 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 802 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 803 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 804 } else {
51c1a580
MS
805 DP(NETIF_MSG_RX_STATUS,
806 "Failed to allocate new pages - dropping packet!\n");
40955532 807 dev_kfree_skb_any(skb);
9f6c9258
DK
808 }
809
e52fcb24
ED
810 /* put new data in bin */
811 rx_buf->data = new_data;
9f6c9258 812
619c5cb6 813 return;
9f6c9258 814 }
07b0f009
ED
815 if (new_data)
816 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
817drop:
818 /* drop the packet and keep the buffer in the bin */
819 DP(NETIF_MSG_RX_STATUS,
820 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 821 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
822}
823
996dedba
MS
824static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
825 u16 index, gfp_t gfp_mask)
1191cb83
ED
826{
827 u8 *data;
828 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
829 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
830 dma_addr_t mapping;
831
996dedba 832 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
833 if (unlikely(data == NULL))
834 return -ENOMEM;
835
836 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
837 fp->rx_buf_size,
838 DMA_FROM_DEVICE);
839 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 840 bnx2x_frag_free(fp, data);
1191cb83
ED
841 BNX2X_ERR("Can't map rx data\n");
842 return -ENOMEM;
843 }
844
845 rx_buf->data = data;
846 dma_unmap_addr_set(rx_buf, mapping, mapping);
847
848 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
849 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
850
851 return 0;
852}
853
15192a8c
BW
854static
855void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
856 struct bnx2x_fastpath *fp,
857 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 858{
e488921f
MS
859 /* Do nothing if no L4 csum validation was done.
860 * We do not check whether IP csum was validated. For IPv4 we assume
861 * that if the card got as far as validating the L4 csum, it also
862 * validated the IP csum. IPv6 has no IP csum.
863 */
d6cb3e41 864 if (cqe->fast_path_cqe.status_flags &
e488921f 865 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
866 return;
867
e488921f 868 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
869
870 if (cqe->fast_path_cqe.type_error_flags &
871 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
872 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 873 qstats->hw_csum_err++;
d6cb3e41
ED
874 else
875 skb->ip_summed = CHECKSUM_UNNECESSARY;
876}
9f6c9258 877
a8f47eb7 878static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
9f6c9258
DK
879{
880 struct bnx2x *bp = fp->bp;
881 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 882 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 883 int rx_pkt = 0;
75b29459
DK
884 union eth_rx_cqe *cqe;
885 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
886
887#ifdef BNX2X_STOP_ON_ERROR
888 if (unlikely(bp->panic))
889 return 0;
890#endif
b3529744
EB
891 if (budget <= 0)
892 return rx_pkt;
9f6c9258 893
9f6c9258
DK
894 bd_cons = fp->rx_bd_cons;
895 bd_prod = fp->rx_bd_prod;
896 bd_prod_fw = bd_prod;
897 sw_comp_cons = fp->rx_comp_cons;
898 sw_comp_prod = fp->rx_comp_prod;
899
75b29459
DK
900 comp_ring_cons = RCQ_BD(sw_comp_cons);
901 cqe = &fp->rx_comp_ring[comp_ring_cons];
902 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
903
904 DP(NETIF_MSG_RX_STATUS,
75b29459 905 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 906
75b29459 907 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
908 struct sw_rx_bd *rx_buf = NULL;
909 struct sk_buff *skb;
9f6c9258 910 u8 cqe_fp_flags;
619c5cb6 911 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 912 u16 len, pad, queue;
e52fcb24 913 u8 *data;
bd5cef03 914 u32 rxhash;
5495ab75 915 enum pkt_hash_types rxhash_type;
9f6c9258 916
619c5cb6
VZ
917#ifdef BNX2X_STOP_ON_ERROR
918 if (unlikely(bp->panic))
919 return 0;
920#endif
921
9f6c9258
DK
922 bd_prod = RX_BD(bd_prod);
923 bd_cons = RX_BD(bd_cons);
924
9aaae044 925 /* A rmb() is required to ensure that the CQE is not read
926 * before it is written by the adapter DMA. PCI ordering
927 * rules will make sure the other fields are written before
928 * the marker at the end of struct eth_fast_path_rx_cqe
929 * but without rmb() a weakly ordered processor can process
930 * stale data. Without the barrier TPA state-machine might
931 * enter inconsistent state and kernel stack might be
932 * provided with incorrect packet description - these lead
933 * to various kernel crashed.
934 */
935 rmb();
936
619c5cb6
VZ
937 cqe_fp_flags = cqe_fp->type_error_flags;
938 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 939
51c1a580
MS
940 DP(NETIF_MSG_RX_STATUS,
941 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
942 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
943 cqe_fp_flags, cqe_fp->status_flags,
944 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
945 le16_to_cpu(cqe_fp->vlan_tag),
946 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
947
948 /* is this a slowpath msg? */
619c5cb6 949 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
950 bnx2x_sp_event(fp, cqe);
951 goto next_cqe;
e52fcb24 952 }
621b4d66 953
e52fcb24
ED
954 rx_buf = &fp->rx_buf_ring[bd_cons];
955 data = rx_buf->data;
9f6c9258 956
e52fcb24 957 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
958 struct bnx2x_agg_info *tpa_info;
959 u16 frag_size, pages;
619c5cb6 960#ifdef BNX2X_STOP_ON_ERROR
e52fcb24 961 /* sanity check */
7e6b4d44 962 if (fp->mode == TPA_MODE_DISABLED &&
e52fcb24
ED
963 (CQE_TYPE_START(cqe_fp_type) ||
964 CQE_TYPE_STOP(cqe_fp_type)))
7e6b4d44 965 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
e52fcb24 966 CQE_TYPE(cqe_fp_type));
619c5cb6 967#endif
9f6c9258 968
e52fcb24
ED
969 if (CQE_TYPE_START(cqe_fp_type)) {
970 u16 queue = cqe_fp->queue_index;
971 DP(NETIF_MSG_RX_STATUS,
972 "calling tpa_start on queue %d\n",
973 queue);
9f6c9258 974
e52fcb24
ED
975 bnx2x_tpa_start(fp, queue,
976 bd_cons, bd_prod,
977 cqe_fp);
621b4d66 978
e52fcb24 979 goto next_rx;
621b4d66
DK
980 }
981 queue = cqe->end_agg_cqe.queue_index;
982 tpa_info = &fp->tpa_info[queue];
983 DP(NETIF_MSG_RX_STATUS,
984 "calling tpa_stop on queue %d\n",
985 queue);
986
987 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
988 tpa_info->len_on_bd;
989
990 if (fp->mode == TPA_MODE_GRO)
991 pages = (frag_size + tpa_info->full_page - 1) /
992 tpa_info->full_page;
993 else
994 pages = SGE_PAGE_ALIGN(frag_size) >>
995 SGE_PAGE_SHIFT;
996
997 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
998 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 999#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
1000 if (bp->panic)
1001 return 0;
9f6c9258
DK
1002#endif
1003
621b4d66
DK
1004 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1005 goto next_cqe;
e52fcb24
ED
1006 }
1007 /* non TPA */
621b4d66 1008 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
1009 pad = cqe_fp->placement_offset;
1010 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 1011 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
1012 pad + RX_COPY_THRESH,
1013 DMA_FROM_DEVICE);
1014 pad += NET_SKB_PAD;
1015 prefetch(data + pad); /* speedup eth_type_trans() */
1016 /* is this an error packet? */
1017 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 1018 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
1019 "ERROR flags %x rx packet %u\n",
1020 cqe_fp_flags, sw_comp_cons);
15192a8c 1021 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
1022 goto reuse_rx;
1023 }
9f6c9258 1024
e52fcb24
ED
1025 /* Since we don't have a jumbo ring
1026 * copy small packets if mtu > 1500
1027 */
1028 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1029 (len <= RX_COPY_THRESH)) {
45abfb10 1030 skb = napi_alloc_skb(&fp->napi, len);
e52fcb24 1031 if (skb == NULL) {
51c1a580 1032 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 1033 "ERROR packet dropped because of alloc failure\n");
15192a8c 1034 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
1035 goto reuse_rx;
1036 }
e52fcb24
ED
1037 memcpy(skb->data, data + pad, len);
1038 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1039 } else {
996dedba
MS
1040 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1041 GFP_ATOMIC) == 0)) {
9f6c9258 1042 dma_unmap_single(&bp->pdev->dev,
e52fcb24 1043 dma_unmap_addr(rx_buf, mapping),
a8c94b91 1044 fp->rx_buf_size,
9f6c9258 1045 DMA_FROM_DEVICE);
d46d132c 1046 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 1047 if (unlikely(!skb)) {
d46d132c 1048 bnx2x_frag_free(fp, data);
15192a8c
BW
1049 bnx2x_fp_qstats(bp, fp)->
1050 rx_skb_alloc_failed++;
e52fcb24
ED
1051 goto next_rx;
1052 }
9f6c9258 1053 skb_reserve(skb, pad);
9f6c9258 1054 } else {
51c1a580
MS
1055 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1056 "ERROR packet dropped because of alloc failure\n");
15192a8c 1057 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 1058reuse_rx:
e52fcb24 1059 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
1060 goto next_rx;
1061 }
036d2df9 1062 }
9f6c9258 1063
036d2df9
DK
1064 skb_put(skb, len);
1065 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 1066
036d2df9 1067 /* Set Toeplitz hash for a none-LRO skb */
5495ab75
TH
1068 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1069 skb_set_hash(skb, rxhash, rxhash_type);
9f6c9258 1070
036d2df9 1071 skb_checksum_none_assert(skb);
f85582f8 1072
d6cb3e41 1073 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
1074 bnx2x_csum_validate(skb, cqe, fp,
1075 bnx2x_fp_qstats(bp, fp));
9f6c9258 1076
f233cafe 1077 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1078
eeed018c 1079 /* Check if this packet was timestamped */
56daf66d 1080 if (unlikely(cqe->fast_path_cqe.type_error_flags &
eeed018c
MK
1081 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1082 bnx2x_set_rx_ts(bp, skb);
1083
619c5cb6
VZ
1084 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1085 PARSING_FLAGS_VLAN)
86a9bad3 1086 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1087 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1088
b59768c6 1089 napi_gro_receive(&fp->napi, skb);
9f6c9258 1090next_rx:
e52fcb24 1091 rx_buf->data = NULL;
9f6c9258
DK
1092
1093 bd_cons = NEXT_RX_IDX(bd_cons);
1094 bd_prod = NEXT_RX_IDX(bd_prod);
1095 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1096 rx_pkt++;
1097next_cqe:
1098 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1099 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1100
75b29459
DK
1101 /* mark CQE as free */
1102 BNX2X_SEED_CQE(cqe_fp);
1103
9f6c9258
DK
1104 if (rx_pkt == budget)
1105 break;
75b29459
DK
1106
1107 comp_ring_cons = RCQ_BD(sw_comp_cons);
1108 cqe = &fp->rx_comp_ring[comp_ring_cons];
1109 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1110 } /* while */
1111
1112 fp->rx_bd_cons = bd_cons;
1113 fp->rx_bd_prod = bd_prod_fw;
1114 fp->rx_comp_cons = sw_comp_cons;
1115 fp->rx_comp_prod = sw_comp_prod;
1116
1117 /* Update producers */
1118 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1119 fp->rx_sge_prod);
1120
9f6c9258
DK
1121 return rx_pkt;
1122}
1123
1124static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1125{
1126 struct bnx2x_fastpath *fp = fp_cookie;
1127 struct bnx2x *bp = fp->bp;
6383c0b3 1128 u8 cos;
9f6c9258 1129
51c1a580
MS
1130 DP(NETIF_MSG_INTR,
1131 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1132 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1133
523224a3 1134 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1135
1136#ifdef BNX2X_STOP_ON_ERROR
1137 if (unlikely(bp->panic))
1138 return IRQ_HANDLED;
1139#endif
1140
1141 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1142 for_each_cos_in_tx_queue(fp, cos)
65565884 1143 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1144
523224a3 1145 prefetch(&fp->sb_running_index[SM_RX_ID]);
f5fbf115 1146 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
9f6c9258
DK
1147
1148 return IRQ_HANDLED;
1149}
1150
9f6c9258
DK
1151/* HW Lock for shared dual port PHYs */
1152void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1153{
1154 mutex_lock(&bp->port.phy_mutex);
1155
8203c4b6 1156 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1157}
1158
1159void bnx2x_release_phy_lock(struct bnx2x *bp)
1160{
8203c4b6 1161 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1162
1163 mutex_unlock(&bp->port.phy_mutex);
1164}
1165
0793f83f
DK
1166/* calculates MF speed according to current linespeed and MF configuration */
1167u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1168{
1169 u16 line_speed = bp->link_vars.line_speed;
1170 if (IS_MF(bp)) {
faa6fcbb
DK
1171 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1172 bp->mf_config[BP_VN(bp)]);
1173
1174 /* Calculate the current MAX line speed limit for the MF
1175 * devices
0793f83f 1176 */
da3cc2da 1177 if (IS_MF_PERCENT_BW(bp))
faa6fcbb
DK
1178 line_speed = (line_speed * maxCfg) / 100;
1179 else { /* SD mode */
0793f83f
DK
1180 u16 vn_max_rate = maxCfg * 100;
1181
1182 if (vn_max_rate < line_speed)
1183 line_speed = vn_max_rate;
faa6fcbb 1184 }
0793f83f
DK
1185 }
1186
1187 return line_speed;
1188}
1189
2ae17f66
VZ
1190/**
1191 * bnx2x_fill_report_data - fill link report data to report
1192 *
1193 * @bp: driver handle
1194 * @data: link state to update
1195 *
1196 * It uses a none-atomic bit operations because is called under the mutex.
1197 */
1191cb83
ED
1198static void bnx2x_fill_report_data(struct bnx2x *bp,
1199 struct bnx2x_link_report_data *data)
2ae17f66 1200{
2ae17f66
VZ
1201 memset(data, 0, sizeof(*data));
1202
6495d15a
DK
1203 if (IS_PF(bp)) {
1204 /* Fill the report data: effective line speed */
1205 data->line_speed = bnx2x_get_mf_speed(bp);
1206
1207 /* Link is down */
1208 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1209 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1210 &data->link_report_flags);
1211
1212 if (!BNX2X_NUM_ETH_QUEUES(bp))
1213 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1214 &data->link_report_flags);
1215
1216 /* Full DUPLEX */
1217 if (bp->link_vars.duplex == DUPLEX_FULL)
1218 __set_bit(BNX2X_LINK_REPORT_FD,
1219 &data->link_report_flags);
1220
1221 /* Rx Flow Control is ON */
1222 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1223 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1224 &data->link_report_flags);
1225
1226 /* Tx Flow Control is ON */
1227 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1228 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1229 &data->link_report_flags);
1230 } else { /* VF */
1231 *data = bp->vf_link_vars;
1232 }
2ae17f66
VZ
1233}
1234
1235/**
1236 * bnx2x_link_report - report link status to OS.
1237 *
1238 * @bp: driver handle
1239 *
1240 * Calls the __bnx2x_link_report() under the same locking scheme
1241 * as a link/PHY state managing code to ensure a consistent link
1242 * reporting.
1243 */
1244
9f6c9258
DK
1245void bnx2x_link_report(struct bnx2x *bp)
1246{
2ae17f66
VZ
1247 bnx2x_acquire_phy_lock(bp);
1248 __bnx2x_link_report(bp);
1249 bnx2x_release_phy_lock(bp);
1250}
9f6c9258 1251
2ae17f66
VZ
1252/**
1253 * __bnx2x_link_report - report link status to OS.
1254 *
1255 * @bp: driver handle
1256 *
16a5fd92 1257 * None atomic implementation.
2ae17f66
VZ
1258 * Should be called under the phy_lock.
1259 */
1260void __bnx2x_link_report(struct bnx2x *bp)
1261{
1262 struct bnx2x_link_report_data cur_data;
9f6c9258 1263
2ae17f66 1264 /* reread mf_cfg */
ad5afc89 1265 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1266 bnx2x_read_mf_cfg(bp);
1267
1268 /* Read the current link report info */
1269 bnx2x_fill_report_data(bp, &cur_data);
1270
1271 /* Don't report link down or exactly the same link status twice */
1272 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1273 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1274 &bp->last_reported_link.link_report_flags) &&
1275 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1276 &cur_data.link_report_flags)))
1277 return;
1278
1279 bp->link_cnt++;
9f6c9258 1280
2ae17f66
VZ
1281 /* We are going to report a new link parameters now -
1282 * remember the current data for the next time.
1283 */
1284 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1285
6495d15a
DK
1286 /* propagate status to VFs */
1287 if (IS_PF(bp))
1288 bnx2x_iov_link_update(bp);
1289
2ae17f66
VZ
1290 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1291 &cur_data.link_report_flags)) {
1292 netif_carrier_off(bp->dev);
1293 netdev_err(bp->dev, "NIC Link is Down\n");
1294 return;
1295 } else {
94f05b0f
JP
1296 const char *duplex;
1297 const char *flow;
1298
2ae17f66 1299 netif_carrier_on(bp->dev);
9f6c9258 1300
2ae17f66
VZ
1301 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1302 &cur_data.link_report_flags))
94f05b0f 1303 duplex = "full";
9f6c9258 1304 else
94f05b0f 1305 duplex = "half";
9f6c9258 1306
2ae17f66
VZ
1307 /* Handle the FC at the end so that only these flags would be
1308 * possibly set. This way we may easily check if there is no FC
1309 * enabled.
1310 */
1311 if (cur_data.link_report_flags) {
1312 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1313 &cur_data.link_report_flags)) {
2ae17f66
VZ
1314 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1315 &cur_data.link_report_flags))
94f05b0f
JP
1316 flow = "ON - receive & transmit";
1317 else
1318 flow = "ON - receive";
9f6c9258 1319 } else {
94f05b0f 1320 flow = "ON - transmit";
9f6c9258 1321 }
94f05b0f
JP
1322 } else {
1323 flow = "none";
9f6c9258 1324 }
94f05b0f
JP
1325 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1326 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1327 }
1328}
1329
1191cb83
ED
1330static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1331{
1332 int i;
1333
1334 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1335 struct eth_rx_sge *sge;
1336
1337 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1338 sge->addr_hi =
1339 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1340 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1341
1342 sge->addr_lo =
1343 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1344 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1345 }
1346}
1347
1348static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1349 struct bnx2x_fastpath *fp, int last)
1350{
1351 int i;
1352
1353 for (i = 0; i < last; i++) {
1354 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1355 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1356 u8 *data = first_buf->data;
1357
1358 if (data == NULL) {
1359 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1360 continue;
1361 }
1362 if (tpa_info->tpa_state == BNX2X_TPA_START)
1363 dma_unmap_single(&bp->pdev->dev,
1364 dma_unmap_addr(first_buf, mapping),
1365 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1366 bnx2x_frag_free(fp, data);
1191cb83
ED
1367 first_buf->data = NULL;
1368 }
1369}
1370
55c11941
MS
1371void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1372{
1373 int j;
1374
1375 for_each_rx_queue_cnic(bp, j) {
1376 struct bnx2x_fastpath *fp = &bp->fp[j];
1377
1378 fp->rx_bd_cons = 0;
1379
1380 /* Activate BD ring */
1381 /* Warning!
1382 * this will generate an interrupt (to the TSTORM)
1383 * must only be done after chip is initialized
1384 */
1385 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1386 fp->rx_sge_prod);
1387 }
1388}
1389
9f6c9258
DK
1390void bnx2x_init_rx_rings(struct bnx2x *bp)
1391{
1392 int func = BP_FUNC(bp);
523224a3 1393 u16 ring_prod;
9f6c9258 1394 int i, j;
25141580 1395
b3b83c3f 1396 /* Allocate TPA resources */
55c11941 1397 for_each_eth_queue(bp, j) {
523224a3 1398 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1399
a8c94b91
VZ
1400 DP(NETIF_MSG_IFUP,
1401 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1402
7e6b4d44 1403 if (fp->mode != TPA_MODE_DISABLED) {
16a5fd92 1404 /* Fill the per-aggregation pool */
dfacf138 1405 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1406 struct bnx2x_agg_info *tpa_info =
1407 &fp->tpa_info[i];
1408 struct sw_rx_bd *first_buf =
1409 &tpa_info->first_buf;
1410
996dedba
MS
1411 first_buf->data =
1412 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1413 if (!first_buf->data) {
51c1a580
MS
1414 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1415 j);
9f6c9258 1416 bnx2x_free_tpa_pool(bp, fp, i);
7e6b4d44 1417 fp->mode = TPA_MODE_DISABLED;
9f6c9258
DK
1418 break;
1419 }
619c5cb6
VZ
1420 dma_unmap_addr_set(first_buf, mapping, 0);
1421 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1422 }
523224a3
DK
1423
1424 /* "next page" elements initialization */
1425 bnx2x_set_next_page_sgl(fp);
1426
1427 /* set SGEs bit mask */
1428 bnx2x_init_sge_ring_bit_mask(fp);
1429
1430 /* Allocate SGEs and initialize the ring elements */
1431 for (i = 0, ring_prod = 0;
1432 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1433
996dedba
MS
1434 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1435 GFP_KERNEL) < 0) {
51c1a580
MS
1436 BNX2X_ERR("was only able to allocate %d rx sges\n",
1437 i);
1438 BNX2X_ERR("disabling TPA for queue[%d]\n",
1439 j);
523224a3 1440 /* Cleanup already allocated elements */
619c5cb6
VZ
1441 bnx2x_free_rx_sge_range(bp, fp,
1442 ring_prod);
1443 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1444 MAX_AGG_QS(bp));
7e6b4d44 1445 fp->mode = TPA_MODE_DISABLED;
523224a3
DK
1446 ring_prod = 0;
1447 break;
1448 }
1449 ring_prod = NEXT_SGE_IDX(ring_prod);
1450 }
1451
1452 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1453 }
1454 }
1455
55c11941 1456 for_each_eth_queue(bp, j) {
9f6c9258
DK
1457 struct bnx2x_fastpath *fp = &bp->fp[j];
1458
1459 fp->rx_bd_cons = 0;
9f6c9258 1460
b3b83c3f
DK
1461 /* Activate BD ring */
1462 /* Warning!
1463 * this will generate an interrupt (to the TSTORM)
1464 * must only be done after chip is initialized
1465 */
1466 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1467 fp->rx_sge_prod);
9f6c9258 1468
9f6c9258
DK
1469 if (j != 0)
1470 continue;
1471
619c5cb6 1472 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1473 REG_WR(bp, BAR_USTRORM_INTMEM +
1474 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1475 U64_LO(fp->rx_comp_mapping));
1476 REG_WR(bp, BAR_USTRORM_INTMEM +
1477 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1478 U64_HI(fp->rx_comp_mapping));
1479 }
9f6c9258
DK
1480 }
1481}
f85582f8 1482
55c11941 1483static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1484{
6383c0b3 1485 u8 cos;
55c11941 1486 struct bnx2x *bp = fp->bp;
9f6c9258 1487
55c11941
MS
1488 for_each_cos_in_tx_queue(fp, cos) {
1489 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1490 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1491
55c11941
MS
1492 u16 sw_prod = txdata->tx_pkt_prod;
1493 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1494
55c11941
MS
1495 while (sw_cons != sw_prod) {
1496 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1497 &pkts_compl, &bytes_compl);
1498 sw_cons++;
9f6c9258 1499 }
55c11941
MS
1500
1501 netdev_tx_reset_queue(
1502 netdev_get_tx_queue(bp->dev,
1503 txdata->txq_index));
1504 }
1505}
1506
1507static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1508{
1509 int i;
1510
1511 for_each_tx_queue_cnic(bp, i) {
1512 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1513 }
1514}
1515
1516static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1517{
1518 int i;
1519
1520 for_each_eth_queue(bp, i) {
1521 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1522 }
1523}
1524
b3b83c3f
DK
1525static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1526{
1527 struct bnx2x *bp = fp->bp;
1528 int i;
1529
1530 /* ring wasn't allocated */
1531 if (fp->rx_buf_ring == NULL)
1532 return;
1533
1534 for (i = 0; i < NUM_RX_BD; i++) {
1535 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1536 u8 *data = rx_buf->data;
b3b83c3f 1537
e52fcb24 1538 if (data == NULL)
b3b83c3f 1539 continue;
b3b83c3f
DK
1540 dma_unmap_single(&bp->pdev->dev,
1541 dma_unmap_addr(rx_buf, mapping),
1542 fp->rx_buf_size, DMA_FROM_DEVICE);
1543
e52fcb24 1544 rx_buf->data = NULL;
d46d132c 1545 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1546 }
1547}
1548
55c11941
MS
1549static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1550{
1551 int j;
1552
1553 for_each_rx_queue_cnic(bp, j) {
1554 bnx2x_free_rx_bds(&bp->fp[j]);
1555 }
1556}
1557
9f6c9258
DK
1558static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1559{
b3b83c3f 1560 int j;
9f6c9258 1561
55c11941 1562 for_each_eth_queue(bp, j) {
9f6c9258
DK
1563 struct bnx2x_fastpath *fp = &bp->fp[j];
1564
b3b83c3f 1565 bnx2x_free_rx_bds(fp);
9f6c9258 1566
7e6b4d44 1567 if (fp->mode != TPA_MODE_DISABLED)
dfacf138 1568 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1569 }
1570}
1571
a8f47eb7 1572static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
55c11941
MS
1573{
1574 bnx2x_free_tx_skbs_cnic(bp);
1575 bnx2x_free_rx_skbs_cnic(bp);
1576}
1577
9f6c9258
DK
1578void bnx2x_free_skbs(struct bnx2x *bp)
1579{
1580 bnx2x_free_tx_skbs(bp);
1581 bnx2x_free_rx_skbs(bp);
1582}
1583
e3835b99
DK
1584void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1585{
1586 /* load old values */
1587 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1588
1589 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1590 /* leave all but MAX value */
1591 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1592
1593 /* set new MAX value */
1594 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1595 & FUNC_MF_CFG_MAX_BW_MASK;
1596
1597 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1598 }
1599}
1600
ca92429f
DK
1601/**
1602 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1603 *
1604 * @bp: driver handle
1605 * @nvecs: number of vectors to be released
1606 */
1607static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1608{
ca92429f 1609 int i, offset = 0;
9f6c9258 1610
ca92429f
DK
1611 if (nvecs == offset)
1612 return;
ad5afc89
AE
1613
1614 /* VFs don't have a default SB */
1615 if (IS_PF(bp)) {
1616 free_irq(bp->msix_table[offset].vector, bp->dev);
1617 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1618 bp->msix_table[offset].vector);
1619 offset++;
1620 }
55c11941
MS
1621
1622 if (CNIC_SUPPORT(bp)) {
1623 if (nvecs == offset)
1624 return;
1625 offset++;
1626 }
ca92429f 1627
ec6ba945 1628 for_each_eth_queue(bp, i) {
ca92429f
DK
1629 if (nvecs == offset)
1630 return;
51c1a580
MS
1631 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1632 i, bp->msix_table[offset].vector);
9f6c9258 1633
ca92429f 1634 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1635 }
1636}
1637
d6214d7a 1638void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1639{
30a5de77 1640 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1641 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1642 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1643
1644 /* vfs don't have a default status block */
1645 if (IS_PF(bp))
1646 nvecs++;
1647
1648 bnx2x_free_msix_irqs(bp, nvecs);
1649 } else {
30a5de77 1650 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1651 }
9f6c9258
DK
1652}
1653
0e8d2ec5 1654int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1655{
1ab4434c 1656 int msix_vec = 0, i, rc;
9f6c9258 1657
1ab4434c
AE
1658 /* VFs don't have a default status block */
1659 if (IS_PF(bp)) {
1660 bp->msix_table[msix_vec].entry = msix_vec;
1661 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1662 bp->msix_table[0].entry);
1663 msix_vec++;
1664 }
9f6c9258 1665
55c11941
MS
1666 /* Cnic requires an msix vector for itself */
1667 if (CNIC_SUPPORT(bp)) {
1668 bp->msix_table[msix_vec].entry = msix_vec;
1669 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1670 msix_vec, bp->msix_table[msix_vec].entry);
1671 msix_vec++;
1672 }
1673
6383c0b3 1674 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1675 for_each_eth_queue(bp, i) {
d6214d7a 1676 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1677 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1678 msix_vec, msix_vec, i);
d6214d7a 1679 msix_vec++;
9f6c9258
DK
1680 }
1681
1ab4434c
AE
1682 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1683 msix_vec);
d6214d7a 1684
a5444b17
AG
1685 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1686 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
9f6c9258
DK
1687 /*
1688 * reconfigure number of tx/rx queues according to available
1689 * MSI-X vectors
1690 */
a5444b17 1691 if (rc == -ENOSPC) {
30a5de77 1692 /* Get by with single vector */
a5444b17
AG
1693 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1694 if (rc < 0) {
30a5de77
DK
1695 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1696 rc);
1697 goto no_msix;
1698 }
1699
1700 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1701 bp->flags |= USING_SINGLE_MSIX_FLAG;
1702
55c11941
MS
1703 BNX2X_DEV_INFO("set number of queues to 1\n");
1704 bp->num_ethernet_queues = 1;
1705 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1706 } else if (rc < 0) {
a5444b17 1707 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1708 goto no_msix;
a5444b17
AG
1709 } else if (rc < msix_vec) {
1710 /* how less vectors we will have? */
1711 int diff = msix_vec - rc;
1712
1713 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1714
1715 /*
1716 * decrease number of queues by number of unallocated entries
1717 */
1718 bp->num_ethernet_queues -= diff;
1719 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1720
1721 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1722 bp->num_queues);
9f6c9258
DK
1723 }
1724
1725 bp->flags |= USING_MSIX_FLAG;
1726
1727 return 0;
30a5de77
DK
1728
1729no_msix:
1730 /* fall to INTx if not enough memory */
1731 if (rc == -ENOMEM)
1732 bp->flags |= DISABLE_MSI_FLAG;
1733
1734 return rc;
9f6c9258
DK
1735}
1736
1737static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1738{
ca92429f 1739 int i, rc, offset = 0;
9f6c9258 1740
ad5afc89
AE
1741 /* no default status block for vf */
1742 if (IS_PF(bp)) {
1743 rc = request_irq(bp->msix_table[offset++].vector,
1744 bnx2x_msix_sp_int, 0,
1745 bp->dev->name, bp->dev);
1746 if (rc) {
1747 BNX2X_ERR("request sp irq failed\n");
1748 return -EBUSY;
1749 }
9f6c9258
DK
1750 }
1751
55c11941
MS
1752 if (CNIC_SUPPORT(bp))
1753 offset++;
1754
ec6ba945 1755 for_each_eth_queue(bp, i) {
9f6c9258
DK
1756 struct bnx2x_fastpath *fp = &bp->fp[i];
1757 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1758 bp->dev->name, i);
1759
d6214d7a 1760 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1761 bnx2x_msix_fp_int, 0, fp->name, fp);
1762 if (rc) {
ca92429f
DK
1763 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1764 bp->msix_table[offset].vector, rc);
1765 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1766 return -EBUSY;
1767 }
1768
d6214d7a 1769 offset++;
9f6c9258
DK
1770 }
1771
ec6ba945 1772 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1773 if (IS_PF(bp)) {
1774 offset = 1 + CNIC_SUPPORT(bp);
1775 netdev_info(bp->dev,
1776 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1777 bp->msix_table[0].vector,
1778 0, bp->msix_table[offset].vector,
1779 i - 1, bp->msix_table[offset + i - 1].vector);
1780 } else {
1781 offset = CNIC_SUPPORT(bp);
1782 netdev_info(bp->dev,
1783 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1784 0, bp->msix_table[offset].vector,
1785 i - 1, bp->msix_table[offset + i - 1].vector);
1786 }
9f6c9258
DK
1787 return 0;
1788}
1789
d6214d7a 1790int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1791{
1792 int rc;
1793
1794 rc = pci_enable_msi(bp->pdev);
1795 if (rc) {
51c1a580 1796 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1797 return -1;
1798 }
1799 bp->flags |= USING_MSI_FLAG;
1800
1801 return 0;
1802}
1803
1804static int bnx2x_req_irq(struct bnx2x *bp)
1805{
1806 unsigned long flags;
30a5de77 1807 unsigned int irq;
9f6c9258 1808
30a5de77 1809 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1810 flags = 0;
1811 else
1812 flags = IRQF_SHARED;
1813
30a5de77
DK
1814 if (bp->flags & USING_MSIX_FLAG)
1815 irq = bp->msix_table[0].vector;
1816 else
1817 irq = bp->pdev->irq;
1818
1819 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1820}
1821
c957d09f 1822static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1823{
1824 int rc = 0;
30a5de77
DK
1825 if (bp->flags & USING_MSIX_FLAG &&
1826 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1827 rc = bnx2x_req_msix_irqs(bp);
1828 if (rc)
1829 return rc;
1830 } else {
619c5cb6
VZ
1831 rc = bnx2x_req_irq(bp);
1832 if (rc) {
1833 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1834 return rc;
1835 }
1836 if (bp->flags & USING_MSI_FLAG) {
1837 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1838 netdev_info(bp->dev, "using MSI IRQ %d\n",
1839 bp->dev->irq);
1840 }
1841 if (bp->flags & USING_MSIX_FLAG) {
1842 bp->dev->irq = bp->msix_table[0].vector;
1843 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1844 bp->dev->irq);
619c5cb6
VZ
1845 }
1846 }
1847
1848 return 0;
1849}
1850
55c11941
MS
1851static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1852{
1853 int i;
1854
8f20aa57 1855 for_each_rx_queue_cnic(bp, i) {
55c11941 1856 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1857 }
55c11941
MS
1858}
1859
1191cb83 1860static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1861{
1862 int i;
1863
8f20aa57 1864 for_each_eth_queue(bp, i) {
9f6c9258 1865 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1866 }
9f6c9258
DK
1867}
1868
55c11941
MS
1869static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1870{
1871 int i;
1872
8f20aa57 1873 for_each_rx_queue_cnic(bp, i) {
55c11941 1874 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57 1875 }
55c11941
MS
1876}
1877
1191cb83 1878static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1879{
1880 int i;
1881
8f20aa57 1882 for_each_eth_queue(bp, i) {
9f6c9258 1883 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57 1884 }
9f6c9258
DK
1885}
1886
1887void bnx2x_netif_start(struct bnx2x *bp)
1888{
4b7ed897
DK
1889 if (netif_running(bp->dev)) {
1890 bnx2x_napi_enable(bp);
55c11941
MS
1891 if (CNIC_LOADED(bp))
1892 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1893 bnx2x_int_enable(bp);
1894 if (bp->state == BNX2X_STATE_OPEN)
1895 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1896 }
1897}
1898
1899void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1900{
1901 bnx2x_int_disable_sync(bp, disable_hw);
1902 bnx2x_napi_disable(bp);
55c11941
MS
1903 if (CNIC_LOADED(bp))
1904 bnx2x_napi_disable_cnic(bp);
9f6c9258 1905}
9f6c9258 1906
f663dd9a 1907u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
99932d4f 1908 void *accel_priv, select_queue_fallback_t fallback)
8307fa3e 1909{
8307fa3e 1910 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1911
55c11941 1912 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1913 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1914 u16 ether_type = ntohs(hdr->h_proto);
1915
1916 /* Skip VLAN tag if present */
1917 if (ether_type == ETH_P_8021Q) {
1918 struct vlan_ethhdr *vhdr =
1919 (struct vlan_ethhdr *)skb->data;
1920
1921 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1922 }
1923
1924 /* If ethertype is FCoE or FIP - use FCoE ring */
1925 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1926 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1927 }
55c11941 1928
cdb9d6ae 1929 /* select a non-FCoE queue */
3968d389 1930 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
8307fa3e
VZ
1931}
1932
d6214d7a
DK
1933void bnx2x_set_num_queues(struct bnx2x *bp)
1934{
96305234 1935 /* RSS queues */
55c11941 1936 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1937
a3348722 1938 /* override in STORAGE SD modes */
2e98ffc2 1939 if (IS_MF_STORAGE_ONLY(bp))
55c11941
MS
1940 bp->num_ethernet_queues = 1;
1941
ec6ba945 1942 /* Add special queues */
55c11941
MS
1943 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1944 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1945
1946 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1947}
1948
cdb9d6ae
VZ
1949/**
1950 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1951 *
1952 * @bp: Driver handle
1953 *
1954 * We currently support for at most 16 Tx queues for each CoS thus we will
1955 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1956 * bp->max_cos.
1957 *
1958 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1959 * index after all ETH L2 indices.
1960 *
1961 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1962 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1963 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1964 *
1965 * The proper configuration of skb->queue_mapping is handled by
1966 * bnx2x_select_queue() and __skb_tx_hash().
1967 *
1968 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1969 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1970 */
55c11941 1971static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1972{
6383c0b3 1973 int rc, tx, rx;
ec6ba945 1974
65565884 1975 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1976 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1977
6383c0b3 1978/* account for fcoe queue */
55c11941
MS
1979 if (include_cnic && !NO_FCOE(bp)) {
1980 rx++;
1981 tx++;
6383c0b3 1982 }
6383c0b3
AE
1983
1984 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1985 if (rc) {
1986 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1987 return rc;
1988 }
1989 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1990 if (rc) {
1991 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1992 return rc;
1993 }
1994
51c1a580 1995 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1996 tx, rx);
1997
ec6ba945
VZ
1998 return rc;
1999}
2000
1191cb83 2001static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
2002{
2003 int i;
2004
2005 for_each_queue(bp, i) {
2006 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 2007 u32 mtu;
a8c94b91
VZ
2008
2009 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2010 if (IS_FCOE_IDX(i))
2011 /*
2012 * Although there are no IP frames expected to arrive to
2013 * this ring we still want to add an
2014 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2015 * overrun attack.
2016 */
e52fcb24 2017 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 2018 else
e52fcb24
ED
2019 mtu = bp->dev->mtu;
2020 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2021 IP_HEADER_ALIGNMENT_PADDING +
e1c6dcca 2022 ETH_OVERHEAD +
e52fcb24
ED
2023 mtu +
2024 BNX2X_FW_RX_ALIGN_END;
9b70de6d 2025 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
16a5fd92 2026 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
2027 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2028 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2029 else
2030 fp->rx_frag_size = 0;
a8c94b91
VZ
2031 }
2032}
2033
60cad4e6 2034static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
2035{
2036 int i;
619c5cb6
VZ
2037 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2038
16a5fd92 2039 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
2040 * enabled
2041 */
5d317c6a
MS
2042 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2043 bp->rss_conf_obj.ind_table[i] =
96305234
DK
2044 bp->fp->cl_id +
2045 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
2046
2047 /*
2048 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2049 * per-port, so if explicit configuration is needed , do it only
2050 * for a PMF.
2051 *
2052 * For 57712 and newer on the other hand it's a per-function
2053 * configuration.
2054 */
5d317c6a 2055 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
2056}
2057
60cad4e6
AE
2058int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2059 bool config_hash, bool enable)
619c5cb6 2060{
3b603066 2061 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
2062
2063 /* Although RSS is meaningless when there is a single HW queue we
2064 * still need it enabled in order to have HW Rx hash generated.
2065 *
2066 * if (!is_eth_multi(bp))
2067 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2068 */
2069
96305234 2070 params.rss_obj = rss_obj;
619c5cb6
VZ
2071
2072 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2073
60cad4e6
AE
2074 if (enable) {
2075 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2076
2077 /* RSS configuration */
2078 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2079 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2080 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2081 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2082 if (rss_obj->udp_rss_v4)
2083 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2084 if (rss_obj->udp_rss_v6)
2085 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
e42780b6 2086
28311f8e
YM
2087 if (!CHIP_IS_E1x(bp)) {
2088 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2089 __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2090 __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2091
e42780b6 2092 /* valid only for TUNN_MODE_GRE tunnel mode */
28311f8e
YM
2093 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2094 }
60cad4e6
AE
2095 } else {
2096 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2097 }
619c5cb6 2098
96305234
DK
2099 /* Hash bits */
2100 params.rss_result_mask = MULTI_MASK;
619c5cb6 2101
5d317c6a 2102 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2103
96305234
DK
2104 if (config_hash) {
2105 /* RSS keys */
e3ec69ca 2106 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2107 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2108 }
2109
60cad4e6
AE
2110 if (IS_PF(bp))
2111 return bnx2x_config_rss(bp, &params);
2112 else
2113 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2114}
2115
1191cb83 2116static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2117{
3b603066 2118 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2119
2120 /* Prepare parameters for function state transitions */
2121 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2122
2123 func_params.f_obj = &bp->func_obj;
2124 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2125
2126 func_params.params.hw_init.load_phase = load_code;
2127
2128 return bnx2x_func_state_change(bp, &func_params);
2129}
2130
2131/*
2132 * Cleans the object that have internal lists without sending
16a5fd92 2133 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2134 */
7fa6f340 2135void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2136{
2137 int rc;
2138 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2139 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2140 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2141
2142 /***************** Cleanup MACs' object first *************************/
2143
2144 /* Wait for completion of requested */
2145 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2146 /* Perform a dry cleanup */
2147 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2148
2149 /* Clean ETH primary MAC */
2150 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2151 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2152 &ramrod_flags);
2153 if (rc != 0)
2154 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2155
2156 /* Cleanup UC list */
2157 vlan_mac_flags = 0;
2158 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2159 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2160 &ramrod_flags);
2161 if (rc != 0)
2162 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2163
2164 /***************** Now clean mcast object *****************************/
2165 rparam.mcast_obj = &bp->mcast_obj;
2166 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2167
8b09be5f
YM
2168 /* Add a DEL command... - Since we're doing a driver cleanup only,
2169 * we take a lock surrounding both the initial send and the CONTs,
2170 * as we don't want a true completion to disrupt us in the middle.
2171 */
2172 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2173 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2174 if (rc < 0)
51c1a580
MS
2175 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2176 rc);
619c5cb6
VZ
2177
2178 /* ...and wait until all pending commands are cleared */
2179 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2180 while (rc != 0) {
2181 if (rc < 0) {
2182 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2183 rc);
8b09be5f 2184 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2185 return;
2186 }
2187
2188 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2189 }
8b09be5f 2190 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2191}
2192
2193#ifndef BNX2X_STOP_ON_ERROR
2194#define LOAD_ERROR_EXIT(bp, label) \
2195 do { \
2196 (bp)->state = BNX2X_STATE_ERROR; \
2197 goto label; \
2198 } while (0)
55c11941
MS
2199
2200#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2201 do { \
2202 bp->cnic_loaded = false; \
2203 goto label; \
2204 } while (0)
2205#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2206#define LOAD_ERROR_EXIT(bp, label) \
2207 do { \
2208 (bp)->state = BNX2X_STATE_ERROR; \
2209 (bp)->panic = 1; \
2210 return -EBUSY; \
2211 } while (0)
55c11941
MS
2212#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2213 do { \
2214 bp->cnic_loaded = false; \
2215 (bp)->panic = 1; \
2216 return -EBUSY; \
2217 } while (0)
2218#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2219
ad5afc89
AE
2220static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2221{
2222 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2223 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2224 return;
2225}
2226
2227static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2228{
8db573ba 2229 int num_groups, vf_headroom = 0;
ad5afc89 2230 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2231
ad5afc89
AE
2232 /* number of queues for statistics is number of eth queues + FCoE */
2233 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2234
ad5afc89
AE
2235 /* Total number of FW statistics requests =
2236 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2237 * and fcoe l2 queue) stats + num of queues (which includes another 1
2238 * for fcoe l2 queue if applicable)
2239 */
2240 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2241
8db573ba
AE
2242 /* vf stats appear in the request list, but their data is allocated by
2243 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2244 * it is used to determine where to place the vf stats queries in the
2245 * request struct
2246 */
2247 if (IS_SRIOV(bp))
6411280a 2248 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2249
ad5afc89
AE
2250 /* Request is built from stats_query_header and an array of
2251 * stats_query_cmd_group each of which contains
2252 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2253 * configured in the stats_query_header.
2254 */
2255 num_groups =
8db573ba
AE
2256 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2257 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2258 1 : 0));
2259
8db573ba
AE
2260 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2261 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2262 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2263 num_groups * sizeof(struct stats_query_cmd_group);
2264
2265 /* Data for statistics requests + stats_counter
2266 * stats_counter holds per-STORM counters that are incremented
2267 * when STORM has finished with the current request.
2268 * memory for FCoE offloaded statistics are counted anyway,
2269 * even if they will not be sent.
2270 * VF stats are not accounted for here as the data of VF stats is stored
2271 * in memory allocated by the VF, not here.
2272 */
2273 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2274 sizeof(struct per_pf_stats) +
2275 sizeof(struct fcoe_statistics_params) +
2276 sizeof(struct per_queue_stats) * num_queue_stats +
2277 sizeof(struct stats_counter);
2278
cd2b0389
JP
2279 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2280 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2281 if (!bp->fw_stats)
2282 goto alloc_mem_err;
ad5afc89
AE
2283
2284 /* Set shortcuts */
2285 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2286 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2287 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2288 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2289 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2290 bp->fw_stats_req_sz;
2291
6bf07b8e 2292 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2293 U64_HI(bp->fw_stats_req_mapping),
2294 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2295 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2296 U64_HI(bp->fw_stats_data_mapping),
2297 U64_LO(bp->fw_stats_data_mapping));
2298 return 0;
2299
2300alloc_mem_err:
2301 bnx2x_free_fw_stats_mem(bp);
2302 BNX2X_ERR("Can't allocate FW stats memory\n");
2303 return -ENOMEM;
2304}
2305
2306/* send load request to mcp and analyze response */
2307static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2308{
178135c1
DK
2309 u32 param;
2310
ad5afc89
AE
2311 /* init fw_seq */
2312 bp->fw_seq =
2313 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2314 DRV_MSG_SEQ_NUMBER_MASK);
2315 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2316
2317 /* Get current FW pulse sequence */
2318 bp->fw_drv_pulse_wr_seq =
2319 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2320 DRV_PULSE_SEQ_MASK);
2321 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2322
178135c1
DK
2323 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2324
2325 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2326 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2327
ad5afc89 2328 /* load request */
178135c1 2329 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2330
2331 /* if mcp fails to respond we must abort */
2332 if (!(*load_code)) {
2333 BNX2X_ERR("MCP response failure, aborting\n");
2334 return -EBUSY;
2335 }
2336
2337 /* If mcp refused (e.g. other port is in diagnostic mode) we
2338 * must abort
2339 */
2340 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2341 BNX2X_ERR("MCP refused load request, aborting\n");
2342 return -EBUSY;
2343 }
2344 return 0;
2345}
2346
2347/* check whether another PF has already loaded FW to chip. In
2348 * virtualized environments a pf from another VM may have already
2349 * initialized the device including loading FW
2350 */
91ebb929 2351int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
ad5afc89
AE
2352{
2353 /* is another pf loaded on this engine? */
2354 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2355 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2356 /* build my FW version dword */
2357 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2358 (BCM_5710_FW_MINOR_VERSION << 8) +
2359 (BCM_5710_FW_REVISION_VERSION << 16) +
2360 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2361
2362 /* read loaded FW from chip */
2363 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2364
2365 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2366 loaded_fw, my_fw);
2367
2368 /* abort nic load if version mismatch */
2369 if (my_fw != loaded_fw) {
91ebb929
YM
2370 if (print_err)
2371 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2372 loaded_fw, my_fw);
2373 else
2374 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2375 loaded_fw, my_fw);
ad5afc89
AE
2376 return -EBUSY;
2377 }
2378 }
2379 return 0;
2380}
2381
2382/* returns the "mcp load_code" according to global load_count array */
2383static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2384{
2385 int path = BP_PATH(bp);
2386
2387 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
a8f47eb7 2388 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2389 bnx2x_load_count[path][2]);
2390 bnx2x_load_count[path][0]++;
2391 bnx2x_load_count[path][1 + port]++;
ad5afc89 2392 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
a8f47eb7 2393 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2394 bnx2x_load_count[path][2]);
2395 if (bnx2x_load_count[path][0] == 1)
ad5afc89 2396 return FW_MSG_CODE_DRV_LOAD_COMMON;
a8f47eb7 2397 else if (bnx2x_load_count[path][1 + port] == 1)
ad5afc89
AE
2398 return FW_MSG_CODE_DRV_LOAD_PORT;
2399 else
2400 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2401}
2402
2403/* mark PMF if applicable */
2404static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2405{
2406 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2407 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2408 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2409 bp->port.pmf = 1;
2410 /* We need the barrier to ensure the ordering between the
2411 * writing to bp->port.pmf here and reading it from the
2412 * bnx2x_periodic_task().
2413 */
2414 smp_mb();
2415 } else {
2416 bp->port.pmf = 0;
452427b0
YM
2417 }
2418
ad5afc89
AE
2419 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2420}
2421
2422static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2423{
2424 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2425 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2426 (bp->common.shmem2_base)) {
2427 if (SHMEM2_HAS(bp, dcc_support))
2428 SHMEM2_WR(bp, dcc_support,
2429 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2430 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2431 if (SHMEM2_HAS(bp, afex_driver_support))
2432 SHMEM2_WR(bp, afex_driver_support,
2433 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2434 }
2435
2436 /* Set AFEX default VLAN tag to an invalid value */
2437 bp->afex_def_vlan_tag = -1;
452427b0
YM
2438}
2439
1191cb83
ED
2440/**
2441 * bnx2x_bz_fp - zero content of the fastpath structure.
2442 *
2443 * @bp: driver handle
2444 * @index: fastpath index to be zeroed
2445 *
2446 * Makes sure the contents of the bp->fp[index].napi is kept
2447 * intact.
2448 */
2449static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2450{
2451 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2452 int cos;
1191cb83 2453 struct napi_struct orig_napi = fp->napi;
15192a8c 2454 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2455
1191cb83 2456 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2457 if (fp->tpa_info)
2458 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2459 sizeof(struct bnx2x_agg_info));
2460 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2461
2462 /* Restore the NAPI object as it has been already initialized */
2463 fp->napi = orig_napi;
15192a8c 2464 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2465 fp->bp = bp;
2466 fp->index = index;
2467 if (IS_ETH_FP(fp))
2468 fp->max_cos = bp->max_cos;
2469 else
2470 /* Special queues support only one CoS */
2471 fp->max_cos = 1;
2472
65565884 2473 /* Init txdata pointers */
65565884
MS
2474 if (IS_FCOE_FP(fp))
2475 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2476 if (IS_ETH_FP(fp))
2477 for_each_cos_in_tx_queue(fp, cos)
2478 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2479 BNX2X_NUM_ETH_QUEUES(bp) + index];
2480
16a5fd92 2481 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2482 * minimal size so it must be set prior to queue memory allocation
2483 */
f8dcb5e3 2484 if (bp->dev->features & NETIF_F_LRO)
1191cb83 2485 fp->mode = TPA_MODE_LRO;
3c3def5f 2486 else if (bp->dev->features & NETIF_F_GRO_HW)
1191cb83 2487 fp->mode = TPA_MODE_GRO;
7e6b4d44
MS
2488 else
2489 fp->mode = TPA_MODE_DISABLED;
1191cb83 2490
22a8f237
MS
2491 /* We don't want TPA if it's disabled in bp
2492 * or if this is an FCoE L2 ring.
2493 */
2494 if (bp->disable_tpa || IS_FCOE_FP(fp))
7e6b4d44 2495 fp->mode = TPA_MODE_DISABLED;
55c11941
MS
2496}
2497
230d00eb
YM
2498void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2499{
2500 u32 cur;
2501
2502 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2503 return;
2504
2505 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2506 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2507 cur, state);
2508
2509 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2510}
2511
55c11941
MS
2512int bnx2x_load_cnic(struct bnx2x *bp)
2513{
2514 int i, rc, port = BP_PORT(bp);
2515
2516 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2517
2518 mutex_init(&bp->cnic_mutex);
2519
ad5afc89
AE
2520 if (IS_PF(bp)) {
2521 rc = bnx2x_alloc_mem_cnic(bp);
2522 if (rc) {
2523 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2524 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2525 }
55c11941
MS
2526 }
2527
2528 rc = bnx2x_alloc_fp_mem_cnic(bp);
2529 if (rc) {
2530 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2531 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2532 }
2533
2534 /* Update the number of queues with the cnic queues */
2535 rc = bnx2x_set_real_num_queues(bp, 1);
2536 if (rc) {
2537 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2538 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2539 }
2540
2541 /* Add all CNIC NAPI objects */
2542 bnx2x_add_all_napi_cnic(bp);
2543 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2544 bnx2x_napi_enable_cnic(bp);
2545
2546 rc = bnx2x_init_hw_func_cnic(bp);
2547 if (rc)
2548 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2549
2550 bnx2x_nic_init_cnic(bp);
2551
ad5afc89
AE
2552 if (IS_PF(bp)) {
2553 /* Enable Timer scan */
2554 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2555
2556 /* setup cnic queues */
2557 for_each_cnic_queue(bp, i) {
2558 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2559 if (rc) {
2560 BNX2X_ERR("Queue setup failed\n");
2561 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2562 }
55c11941
MS
2563 }
2564 }
2565
2566 /* Initialize Rx filter. */
8b09be5f 2567 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2568
2569 /* re-read iscsi info */
2570 bnx2x_get_iscsi_info(bp);
2571 bnx2x_setup_cnic_irq_info(bp);
2572 bnx2x_setup_cnic_info(bp);
2573 bp->cnic_loaded = true;
2574 if (bp->state == BNX2X_STATE_OPEN)
2575 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2576
55c11941
MS
2577 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2578
2579 return 0;
2580
2581#ifndef BNX2X_STOP_ON_ERROR
2582load_error_cnic2:
2583 /* Disable Timer scan */
2584 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2585
2586load_error_cnic1:
2587 bnx2x_napi_disable_cnic(bp);
2588 /* Update the number of queues without the cnic queues */
d9d81862 2589 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2590 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2591load_error_cnic0:
2592 BNX2X_ERR("CNIC-related load failed\n");
2593 bnx2x_free_fp_mem_cnic(bp);
2594 bnx2x_free_mem_cnic(bp);
2595 return rc;
2596#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2597}
2598
9f6c9258
DK
2599/* must be called with rtnl_lock */
2600int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2601{
619c5cb6 2602 int port = BP_PORT(bp);
ad5afc89 2603 int i, rc = 0, load_code = 0;
9f6c9258 2604
55c11941
MS
2605 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2606 DP(NETIF_MSG_IFUP,
2607 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2608
9f6c9258 2609#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2610 if (unlikely(bp->panic)) {
2611 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2612 return -EPERM;
51c1a580 2613 }
9f6c9258
DK
2614#endif
2615
2616 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2617
16a5fd92 2618 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2619 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2620 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2621 &bp->last_reported_link.link_report_flags);
2ae17f66 2622
ad5afc89
AE
2623 if (IS_PF(bp))
2624 /* must be called before memory allocation and HW init */
2625 bnx2x_ilt_set_info(bp);
523224a3 2626
6383c0b3
AE
2627 /*
2628 * Zero fastpath structures preserving invariants like napi, which are
2629 * allocated only once, fp index, max_cos, bp pointer.
7e6b4d44 2630 * Also set fp->mode and txdata_ptr.
b3b83c3f 2631 */
51c1a580 2632 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2633 for_each_queue(bp, i)
2634 bnx2x_bz_fp(bp, i);
55c11941
MS
2635 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2636 bp->num_cnic_queues) *
2637 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2638
55c11941 2639 bp->fcoe_init = false;
6383c0b3 2640
a8c94b91
VZ
2641 /* Set the receive queues buffer size */
2642 bnx2x_set_rx_buf_size(bp);
2643
ad5afc89
AE
2644 if (IS_PF(bp)) {
2645 rc = bnx2x_alloc_mem(bp);
2646 if (rc) {
2647 BNX2X_ERR("Unable to allocate bp memory\n");
2648 return rc;
2649 }
2650 }
2651
ad5afc89
AE
2652 /* need to be done after alloc mem, since it's self adjusting to amount
2653 * of memory available for RSS queues
2654 */
2655 rc = bnx2x_alloc_fp_mem(bp);
2656 if (rc) {
2657 BNX2X_ERR("Unable to allocate memory for fps\n");
2658 LOAD_ERROR_EXIT(bp, load_error0);
2659 }
d6214d7a 2660
e3ed4eae
DK
2661 /* Allocated memory for FW statistics */
2662 if (bnx2x_alloc_fw_stats_mem(bp))
2663 LOAD_ERROR_EXIT(bp, load_error0);
2664
8d9ac297
AE
2665 /* request pf to initialize status blocks */
2666 if (IS_VF(bp)) {
2667 rc = bnx2x_vfpf_init(bp);
2668 if (rc)
2669 LOAD_ERROR_EXIT(bp, load_error0);
2670 }
2671
b3b83c3f
DK
2672 /* As long as bnx2x_alloc_mem() may possibly update
2673 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2674 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2675 */
55c11941 2676 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2677 if (rc) {
ec6ba945 2678 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2679 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2680 }
2681
6383c0b3 2682 /* configure multi cos mappings in kernel.
16a5fd92
YM
2683 * this configuration may be overridden by a multi class queue
2684 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2685 */
2686 bnx2x_setup_tc(bp->dev, bp->max_cos);
2687
26614ba5
MS
2688 /* Add all NAPI objects */
2689 bnx2x_add_all_napi(bp);
55c11941 2690 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2691 bnx2x_napi_enable(bp);
2692
ad5afc89
AE
2693 if (IS_PF(bp)) {
2694 /* set pf load just before approaching the MCP */
2695 bnx2x_set_pf_load(bp);
2696
2697 /* if mcp exists send load request and analyze response */
2698 if (!BP_NOMCP(bp)) {
2699 /* attempt to load pf */
2700 rc = bnx2x_nic_load_request(bp, &load_code);
2701 if (rc)
2702 LOAD_ERROR_EXIT(bp, load_error1);
2703
2704 /* what did mcp say? */
91ebb929 2705 rc = bnx2x_compare_fw_ver(bp, load_code, true);
ad5afc89
AE
2706 if (rc) {
2707 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2708 LOAD_ERROR_EXIT(bp, load_error2);
2709 }
ad5afc89
AE
2710 } else {
2711 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2712 }
9f6c9258 2713
ad5afc89
AE
2714 /* mark pmf if applicable */
2715 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2716
ad5afc89
AE
2717 /* Init Function state controlling object */
2718 bnx2x__init_func_obj(bp);
6383c0b3 2719
ad5afc89
AE
2720 /* Initialize HW */
2721 rc = bnx2x_init_hw(bp, load_code);
2722 if (rc) {
2723 BNX2X_ERR("HW init failed, aborting\n");
2724 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2725 LOAD_ERROR_EXIT(bp, load_error2);
2726 }
9f6c9258
DK
2727 }
2728
ecf01c22
YM
2729 bnx2x_pre_irq_nic_init(bp);
2730
d6214d7a
DK
2731 /* Connect to IRQs */
2732 rc = bnx2x_setup_irqs(bp);
523224a3 2733 if (rc) {
ad5afc89
AE
2734 BNX2X_ERR("setup irqs failed\n");
2735 if (IS_PF(bp))
2736 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2737 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2738 }
2739
619c5cb6 2740 /* Init per-function objects */
ad5afc89 2741 if (IS_PF(bp)) {
ecf01c22
YM
2742 /* Setup NIC internals and enable interrupts */
2743 bnx2x_post_irq_nic_init(bp, load_code);
2744
ad5afc89 2745 bnx2x_init_bp_objs(bp);
b56e9670 2746 bnx2x_iov_nic_init(bp);
a3348722 2747
ad5afc89
AE
2748 /* Set AFEX default VLAN tag to an invalid value */
2749 bp->afex_def_vlan_tag = -1;
2750 bnx2x_nic_load_afex_dcc(bp, load_code);
2751 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2752 rc = bnx2x_func_start(bp);
2753 if (rc) {
2754 BNX2X_ERR("Function start failed!\n");
2755 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2756
619c5cb6 2757 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2758 }
9f6c9258 2759
ad5afc89
AE
2760 /* Send LOAD_DONE command to MCP */
2761 if (!BP_NOMCP(bp)) {
2762 load_code = bnx2x_fw_command(bp,
2763 DRV_MSG_CODE_LOAD_DONE, 0);
2764 if (!load_code) {
2765 BNX2X_ERR("MCP response failure, aborting\n");
2766 rc = -EBUSY;
2767 LOAD_ERROR_EXIT(bp, load_error3);
2768 }
2769 }
9f6c9258 2770
0c14e5ce
AE
2771 /* initialize FW coalescing state machines in RAM */
2772 bnx2x_update_coalesce(bp);
60cad4e6 2773 }
0c14e5ce 2774
60cad4e6
AE
2775 /* setup the leading queue */
2776 rc = bnx2x_setup_leading(bp);
2777 if (rc) {
2778 BNX2X_ERR("Setup leading failed!\n");
2779 LOAD_ERROR_EXIT(bp, load_error3);
2780 }
ad5afc89 2781
60cad4e6
AE
2782 /* set up the rest of the queues */
2783 for_each_nondefault_eth_queue(bp, i) {
2784 if (IS_PF(bp))
2785 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2786 else /* VF */
2787 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2788 if (rc) {
60cad4e6 2789 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2790 LOAD_ERROR_EXIT(bp, load_error3);
2791 }
60cad4e6 2792 }
8d9ac297 2793
60cad4e6
AE
2794 /* setup rss */
2795 rc = bnx2x_init_rss(bp);
2796 if (rc) {
2797 BNX2X_ERR("PF RSS init failed\n");
2798 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2799 }
619c5cb6 2800
523224a3
DK
2801 /* Now when Clients are configured we are ready to work */
2802 bp->state = BNX2X_STATE_OPEN;
2803
619c5cb6 2804 /* Configure a ucast MAC */
ad5afc89
AE
2805 if (IS_PF(bp))
2806 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2807 else /* vf */
f8f4f61a
DK
2808 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2809 true);
51c1a580
MS
2810 if (rc) {
2811 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2812 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2813 }
6e30dd4e 2814
ad5afc89 2815 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2816 bnx2x_update_max_mf_config(bp, bp->pending_max);
2817 bp->pending_max = 0;
2818 }
2819
ad5afc89
AE
2820 if (bp->port.pmf) {
2821 rc = bnx2x_initial_phy_init(bp, load_mode);
2822 if (rc)
2823 LOAD_ERROR_EXIT(bp, load_error3);
2824 }
c63da990 2825 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2826
619c5cb6
VZ
2827 /* Start fast path */
2828
05cc5a39
YM
2829 /* Re-configure vlan filters */
2830 rc = bnx2x_vlan_reconfigure_vid(bp);
2831 if (rc)
2832 LOAD_ERROR_EXIT(bp, load_error3);
2833
619c5cb6 2834 /* Initialize Rx filter. */
8b09be5f 2835 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2836
eeed018c
MK
2837 if (bp->flags & PTP_SUPPORTED) {
2838 bnx2x_init_ptp(bp);
2839 bnx2x_configure_ptp_filters(bp);
2840 }
2841 /* Start Tx */
9f6c9258
DK
2842 switch (load_mode) {
2843 case LOAD_NORMAL:
16a5fd92 2844 /* Tx queue should be only re-enabled */
523224a3 2845 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2846 break;
2847
2848 case LOAD_OPEN:
2849 netif_tx_start_all_queues(bp->dev);
4e857c58 2850 smp_mb__after_atomic();
9f6c9258
DK
2851 break;
2852
2853 case LOAD_DIAG:
8970b2e4 2854 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2855 bp->state = BNX2X_STATE_DIAG;
2856 break;
2857
2858 default:
2859 break;
2860 }
2861
00253a8c 2862 if (bp->port.pmf)
4c704899 2863 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2864 else
9f6c9258
DK
2865 bnx2x__link_status_update(bp);
2866
2867 /* start the timer */
2868 mod_timer(&bp->timer, jiffies + bp->current_interval);
2869
55c11941
MS
2870 if (CNIC_ENABLED(bp))
2871 bnx2x_load_cnic(bp);
9f6c9258 2872
42f8277f
YM
2873 if (IS_PF(bp))
2874 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2875
ad5afc89
AE
2876 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2877 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2878 u32 val;
2879 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
230d00eb
YM
2880 val &= ~DRV_FLAGS_MTU_MASK;
2881 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
9ce392d4
YM
2882 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2883 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2884 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2885 }
2886
619c5cb6 2887 /* Wait for all pending SP commands to complete */
ad5afc89 2888 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2889 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2890 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2891 return -EBUSY;
2892 }
6891dd25 2893
c48f350f
YM
2894 /* Update driver data for On-Chip MFW dump. */
2895 if (IS_PF(bp))
2896 bnx2x_update_mfw_dump(bp);
2897
9876879f
BW
2898 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2899 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2900 bnx2x_dcbx_init(bp, false);
2901
230d00eb
YM
2902 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2903 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2904
55c11941
MS
2905 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2906
9f6c9258
DK
2907 return 0;
2908
619c5cb6 2909#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2910load_error3:
ad5afc89
AE
2911 if (IS_PF(bp)) {
2912 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2913
ad5afc89
AE
2914 /* Clean queueable objects */
2915 bnx2x_squeeze_objects(bp);
2916 }
619c5cb6 2917
9f6c9258
DK
2918 /* Free SKBs, SGEs, TPA pool and driver internals */
2919 bnx2x_free_skbs(bp);
ec6ba945 2920 for_each_rx_queue(bp, i)
9f6c9258 2921 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2922
9f6c9258 2923 /* Release IRQs */
d6214d7a
DK
2924 bnx2x_free_irq(bp);
2925load_error2:
ad5afc89 2926 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2927 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2928 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2929 }
2930
2931 bp->port.pmf = 0;
9f6c9258
DK
2932load_error1:
2933 bnx2x_napi_disable(bp);
722c6f58 2934 bnx2x_del_all_napi(bp);
ad5afc89 2935
889b9af3 2936 /* clear pf_load status, as it was already set */
ad5afc89
AE
2937 if (IS_PF(bp))
2938 bnx2x_clear_pf_load(bp);
d6214d7a 2939load_error0:
ad5afc89 2940 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2941 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2942 bnx2x_free_mem(bp);
2943
2944 return rc;
619c5cb6 2945#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2946}
2947
7fa6f340 2948int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2949{
2950 u8 rc = 0, cos, i;
2951
2952 /* Wait until tx fastpath tasks complete */
2953 for_each_tx_queue(bp, i) {
2954 struct bnx2x_fastpath *fp = &bp->fp[i];
2955
2956 for_each_cos_in_tx_queue(fp, cos)
2957 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2958 if (rc)
2959 return rc;
2960 }
2961 return 0;
2962}
2963
9f6c9258 2964/* must be called with rtnl_lock */
5d07d868 2965int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2966{
2967 int i;
c9ee9206
VZ
2968 bool global = false;
2969
55c11941
MS
2970 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2971
230d00eb
YM
2972 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2973 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2974
9ce392d4 2975 /* mark driver is unloaded in shmem2 */
ad5afc89 2976 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2977 u32 val;
2978 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2979 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2980 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2981 }
2982
80bfe5cc 2983 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2984 (bp->state == BNX2X_STATE_CLOSED ||
2985 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2986 /* We can get here if the driver has been unloaded
2987 * during parity error recovery and is either waiting for a
2988 * leader to complete or for other functions to unload and
2989 * then ifdown has been issued. In this case we want to
2990 * unload and let other functions to complete a recovery
2991 * process.
2992 */
9f6c9258
DK
2993 bp->recovery_state = BNX2X_RECOVERY_DONE;
2994 bp->is_leader = 0;
c9ee9206
VZ
2995 bnx2x_release_leader_lock(bp);
2996 smp_mb();
2997
51c1a580
MS
2998 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2999 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
3000 return -EINVAL;
3001 }
3002
80bfe5cc 3003 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 3004 * have not completed successfully - all resources are released.
80bfe5cc
YM
3005 *
3006 * we can get here only after unsuccessful ndo_* callback, during which
3007 * dev->IFF_UP flag is still on.
3008 */
3009 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3010 return 0;
3011
3012 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
3013 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3014 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3015 */
3016 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3017 smp_mb();
3018
78c3bcc5
AE
3019 /* indicate to VFs that the PF is going down */
3020 bnx2x_iov_channel_down(bp);
3021
55c11941
MS
3022 if (CNIC_LOADED(bp))
3023 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3024
9505ee37
VZ
3025 /* Stop Tx */
3026 bnx2x_tx_disable(bp);
65565884 3027 netdev_reset_tc(bp->dev);
9505ee37 3028
9f6c9258 3029 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 3030
9f6c9258 3031 del_timer_sync(&bp->timer);
f85582f8 3032
f7084059 3033 if (IS_PF(bp) && !BP_NOMCP(bp)) {
ad5afc89
AE
3034 /* Set ALWAYS_ALIVE bit in shmem */
3035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3036 bnx2x_drv_pulse(bp);
3037 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3038 bnx2x_save_statistics(bp);
3039 }
9f6c9258 3040
d78a1f08
YM
3041 /* wait till consumers catch up with producers in all queues.
3042 * If we're recovering, FW can't write to host so no reason
3043 * to wait for the queues to complete all Tx.
3044 */
3045 if (unload_mode != UNLOAD_RECOVERY)
3046 bnx2x_drain_tx_queues(bp);
9f6c9258 3047
9b176b6b
AE
3048 /* if VF indicate to PF this function is going down (PF will delete sp
3049 * elements and clear initializations
3050 */
3051 if (IS_VF(bp))
3052 bnx2x_vfpf_close_vf(bp);
3053 else if (unload_mode != UNLOAD_RECOVERY)
3054 /* if this is a normal/close unload need to clean up chip*/
5d07d868 3055 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 3056 else {
c9ee9206
VZ
3057 /* Send the UNLOAD_REQUEST to the MCP */
3058 bnx2x_send_unload_req(bp, unload_mode);
3059
16a5fd92 3060 /* Prevent transactions to host from the functions on the
c9ee9206 3061 * engine that doesn't reset global blocks in case of global
16a5fd92 3062 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
3063 * (the engine which leader will perform the recovery
3064 * last).
3065 */
3066 if (!CHIP_IS_E1x(bp))
3067 bnx2x_pf_disable(bp);
3068
3069 /* Disable HW interrupts, NAPI */
523224a3 3070 bnx2x_netif_stop(bp, 1);
26614ba5
MS
3071 /* Delete all NAPI objects */
3072 bnx2x_del_all_napi(bp);
55c11941
MS
3073 if (CNIC_LOADED(bp))
3074 bnx2x_del_all_napi_cnic(bp);
523224a3 3075 /* Release IRQs */
d6214d7a 3076 bnx2x_free_irq(bp);
c9ee9206
VZ
3077
3078 /* Report UNLOAD_DONE to MCP */
5d07d868 3079 bnx2x_send_unload_done(bp, false);
523224a3 3080 }
9f6c9258 3081
619c5cb6 3082 /*
16a5fd92 3083 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
3084 * the queueable objects here in case they failed to get cleaned so far.
3085 */
ad5afc89
AE
3086 if (IS_PF(bp))
3087 bnx2x_squeeze_objects(bp);
619c5cb6 3088
79616895
VZ
3089 /* There should be no more pending SP commands at this stage */
3090 bp->sp_state = 0;
3091
9f6c9258
DK
3092 bp->port.pmf = 0;
3093
a0d307b2
DK
3094 /* clear pending work in rtnl task */
3095 bp->sp_rtnl_state = 0;
3096 smp_mb();
3097
9f6c9258
DK
3098 /* Free SKBs, SGEs, TPA pool and driver internals */
3099 bnx2x_free_skbs(bp);
55c11941
MS
3100 if (CNIC_LOADED(bp))
3101 bnx2x_free_skbs_cnic(bp);
ec6ba945 3102 for_each_rx_queue(bp, i)
9f6c9258 3103 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 3104
ad5afc89
AE
3105 bnx2x_free_fp_mem(bp);
3106 if (CNIC_LOADED(bp))
55c11941 3107 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 3108
ad5afc89 3109 if (IS_PF(bp)) {
ad5afc89
AE
3110 if (CNIC_LOADED(bp))
3111 bnx2x_free_mem_cnic(bp);
3112 }
b4cddbd6
AE
3113 bnx2x_free_mem(bp);
3114
9f6c9258 3115 bp->state = BNX2X_STATE_CLOSED;
55c11941 3116 bp->cnic_loaded = false;
9f6c9258 3117
42f8277f 3118 /* Clear driver version indication in shmem */
f7084059 3119 if (IS_PF(bp) && !BP_NOMCP(bp))
42f8277f
YM
3120 bnx2x_update_mng_version(bp);
3121
c9ee9206
VZ
3122 /* Check if there are pending parity attentions. If there are - set
3123 * RECOVERY_IN_PROGRESS.
3124 */
ad5afc89 3125 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
3126 bnx2x_set_reset_in_progress(bp);
3127
3128 /* Set RESET_IS_GLOBAL if needed */
3129 if (global)
3130 bnx2x_set_reset_global(bp);
3131 }
3132
9f6c9258
DK
3133 /* The last driver must disable a "close the gate" if there is no
3134 * parity attention or "process kill" pending.
3135 */
ad5afc89
AE
3136 if (IS_PF(bp) &&
3137 !bnx2x_clear_pf_load(bp) &&
3138 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3139 bnx2x_disable_close_the_gate(bp);
3140
55c11941
MS
3141 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3142
9f6c9258
DK
3143 return 0;
3144}
f85582f8 3145
9f6c9258
DK
3146int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3147{
3148 u16 pmcsr;
3149
adf5f6a1 3150 /* If there is no power capability, silently succeed */
29ed74c3 3151 if (!bp->pdev->pm_cap) {
51c1a580 3152 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3153 return 0;
3154 }
3155
29ed74c3 3156 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3157
3158 switch (state) {
3159 case PCI_D0:
29ed74c3 3160 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3161 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3162 PCI_PM_CTRL_PME_STATUS));
3163
3164 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3165 /* delay required during transition out of D3hot */
3166 msleep(20);
3167 break;
3168
3169 case PCI_D3hot:
3170 /* If there are other clients above don't
3171 shut down the power */
3172 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3173 return 0;
3174 /* Don't shut down the power for emulation and FPGA */
3175 if (CHIP_REV_IS_SLOW(bp))
3176 return 0;
3177
3178 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3179 pmcsr |= 3;
3180
3181 if (bp->wol)
3182 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3183
29ed74c3 3184 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3185 pmcsr);
3186
3187 /* No more memory access after this point until
3188 * device is brought back to D0.
3189 */
3190 break;
3191
3192 default:
51c1a580 3193 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3194 return -EINVAL;
3195 }
3196 return 0;
3197}
3198
9f6c9258
DK
3199/*
3200 * net_device service functions
3201 */
a8f47eb7 3202static int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258 3203{
9f6c9258
DK
3204 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3205 napi);
3206 struct bnx2x *bp = fp->bp;
4d6acb62
ED
3207 int rx_work_done;
3208 u8 cos;
9f6c9258 3209
9f6c9258 3210#ifdef BNX2X_STOP_ON_ERROR
4d6acb62
ED
3211 if (unlikely(bp->panic)) {
3212 napi_complete(napi);
3213 return 0;
3214 }
9f6c9258 3215#endif
4d6acb62
ED
3216 for_each_cos_in_tx_queue(fp, cos)
3217 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3218 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
9f6c9258 3219
4d6acb62 3220 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
55c11941 3221
4d6acb62
ED
3222 if (rx_work_done < budget) {
3223 /* No need to update SB for FCoE L2 ring as long as
3224 * it's connected to the default SB and the SB
3225 * has been updated when NAPI was scheduled.
3226 */
3227 if (IS_FCOE_FP(fp)) {
6ad20165 3228 napi_complete_done(napi, rx_work_done);
4d6acb62 3229 } else {
9f6c9258 3230 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3231 /* bnx2x_has_rx_work() reads the status block,
3232 * thus we need to ensure that status block indices
3233 * have been actually read (bnx2x_update_fpsb_idx)
3234 * prior to this check (bnx2x_has_rx_work) so that
3235 * we won't write the "newer" value of the status block
3236 * to IGU (if there was a DMA right after
3237 * bnx2x_has_rx_work and if there is no rmb, the memory
3238 * reading (bnx2x_update_fpsb_idx) may be postponed
3239 * to right before bnx2x_ack_sb). In this case there
3240 * will never be another interrupt until there is
3241 * another update of the status block, while there
3242 * is still unhandled work.
3243 */
9f6c9258
DK
3244 rmb();
3245
3246 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
80f1c21c
ED
3247 if (napi_complete_done(napi, rx_work_done)) {
3248 /* Re-enable interrupts */
3249 DP(NETIF_MSG_RX_STATUS,
3250 "Update index to %d\n", fp->fp_hc_idx);
3251 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3252 le16_to_cpu(fp->fp_hc_idx),
3253 IGU_INT_ENABLE, 1);
3254 }
4d6acb62
ED
3255 } else {
3256 rx_work_done = budget;
9f6c9258
DK
3257 }
3258 }
3259 }
3260
4d6acb62 3261 return rx_work_done;
9f6c9258
DK
3262}
3263
9f6c9258
DK
3264/* we split the first BD into headers and data BDs
3265 * to ease the pain of our fellow microcode engineers
3266 * we use one mapping for both BDs
9f6c9258 3267 */
91226790
DK
3268static u16 bnx2x_tx_split(struct bnx2x *bp,
3269 struct bnx2x_fp_txdata *txdata,
3270 struct sw_tx_bd *tx_buf,
3271 struct eth_tx_start_bd **tx_bd, u16 hlen,
3272 u16 bd_prod)
9f6c9258
DK
3273{
3274 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3275 struct eth_tx_bd *d_tx_bd;
3276 dma_addr_t mapping;
3277 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3278
3279 /* first fix first BD */
9f6c9258
DK
3280 h_tx_bd->nbytes = cpu_to_le16(hlen);
3281
91226790
DK
3282 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3283 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3284
3285 /* now get a new data BD
3286 * (after the pbd) and fill it */
3287 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3288 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3289
3290 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3291 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3292
3293 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3294 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3295 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3296
3297 /* this marks the BD as one that has no individual mapping */
3298 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3299
3300 DP(NETIF_MSG_TX_QUEUED,
3301 "TSO split data size is %d (%x:%x)\n",
3302 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3303
3304 /* update tx_bd */
3305 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3306
3307 return bd_prod;
3308}
3309
86564c3f
YM
3310#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3311#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3312static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3313{
86564c3f
YM
3314 __sum16 tsum = (__force __sum16) csum;
3315
9f6c9258 3316 if (fix > 0)
86564c3f
YM
3317 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3318 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3319
3320 else if (fix < 0)
86564c3f
YM
3321 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3322 csum_partial(t_header, -fix, 0)));
9f6c9258 3323
e2593fcd 3324 return bswab16(tsum);
9f6c9258
DK
3325}
3326
91226790 3327static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3328{
3329 u32 rc;
a848ade4
DK
3330 __u8 prot = 0;
3331 __be16 protocol;
9f6c9258
DK
3332
3333 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3334 return XMIT_PLAIN;
9f6c9258 3335
a848ade4
DK
3336 protocol = vlan_get_protocol(skb);
3337 if (protocol == htons(ETH_P_IPV6)) {
3338 rc = XMIT_CSUM_V6;
3339 prot = ipv6_hdr(skb)->nexthdr;
3340 } else {
3341 rc = XMIT_CSUM_V4;
3342 prot = ip_hdr(skb)->protocol;
3343 }
9f6c9258 3344
a848ade4
DK
3345 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3346 if (inner_ip_hdr(skb)->version == 6) {
3347 rc |= XMIT_CSUM_ENC_V6;
3348 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3349 rc |= XMIT_CSUM_TCP;
9f6c9258 3350 } else {
a848ade4
DK
3351 rc |= XMIT_CSUM_ENC_V4;
3352 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3353 rc |= XMIT_CSUM_TCP;
3354 }
3355 }
a848ade4
DK
3356 if (prot == IPPROTO_TCP)
3357 rc |= XMIT_CSUM_TCP;
9f6c9258 3358
36a8f39e
ED
3359 if (skb_is_gso(skb)) {
3360 if (skb_is_gso_v6(skb)) {
3361 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3362 if (rc & XMIT_CSUM_ENC)
3363 rc |= XMIT_GSO_ENC_V6;
3364 } else {
3365 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3366 if (rc & XMIT_CSUM_ENC)
3367 rc |= XMIT_GSO_ENC_V4;
3368 }
a848ade4 3369 }
9f6c9258
DK
3370
3371 return rc;
3372}
3373
ea2465af
YM
3374/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3375#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3376
3377/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3378#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3379
3380#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3381/* check if packet requires linearization (packet is too fragmented)
3382 no need to check fragmentation if page size > 8K (there will be no
3383 violation to FW restrictions) */
3384static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3385 u32 xmit_type)
3386{
ea2465af
YM
3387 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3388 int to_copy = 0, hlen = 0;
9f6c9258 3389
ea2465af
YM
3390 if (xmit_type & XMIT_GSO_ENC)
3391 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
9f6c9258 3392
ea2465af 3393 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
9f6c9258
DK
3394 if (xmit_type & XMIT_GSO) {
3395 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
ea2465af 3396 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
9f6c9258
DK
3397 /* Number of windows to check */
3398 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3399 int wnd_idx = 0;
3400 int frag_idx = 0;
3401 u32 wnd_sum = 0;
3402
3403 /* Headers length */
592b9b8d
YM
3404 if (xmit_type & XMIT_GSO_ENC)
3405 hlen = (int)(skb_inner_transport_header(skb) -
3406 skb->data) +
3407 inner_tcp_hdrlen(skb);
3408 else
3409 hlen = (int)(skb_transport_header(skb) -
3410 skb->data) + tcp_hdrlen(skb);
9f6c9258
DK
3411
3412 /* Amount of data (w/o headers) on linear part of SKB*/
3413 first_bd_sz = skb_headlen(skb) - hlen;
3414
3415 wnd_sum = first_bd_sz;
3416
3417 /* Calculate the first sum - it's special */
3418 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3419 wnd_sum +=
9e903e08 3420 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3421
3422 /* If there was data on linear skb data - check it */
3423 if (first_bd_sz > 0) {
3424 if (unlikely(wnd_sum < lso_mss)) {
3425 to_copy = 1;
3426 goto exit_lbl;
3427 }
3428
3429 wnd_sum -= first_bd_sz;
3430 }
3431
3432 /* Others are easier: run through the frag list and
3433 check all windows */
3434 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3435 wnd_sum +=
9e903e08 3436 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3437
3438 if (unlikely(wnd_sum < lso_mss)) {
3439 to_copy = 1;
3440 break;
3441 }
3442 wnd_sum -=
9e903e08 3443 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3444 }
3445 } else {
3446 /* in non-LSO too fragmented packet should always
3447 be linearized */
3448 to_copy = 1;
3449 }
3450 }
3451
3452exit_lbl:
3453 if (unlikely(to_copy))
3454 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3455 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3456 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3457 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3458
3459 return to_copy;
3460}
3461#endif
3462
f2e0899f 3463/**
e8920674 3464 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3465 *
e8920674
DK
3466 * @skb: packet skb
3467 * @pbd: parse BD
3468 * @xmit_type: xmit flags
f2e0899f 3469 */
91226790
DK
3470static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3471 struct eth_tx_parse_bd_e1x *pbd,
3472 u32 xmit_type)
f2e0899f
DK
3473{
3474 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3475 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3476 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3477
3478 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3479 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3480 pbd->tcp_pseudo_csum =
86564c3f
YM
3481 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3482 ip_hdr(skb)->daddr,
3483 0, IPPROTO_TCP, 0));
057cf65e 3484 } else {
f2e0899f 3485 pbd->tcp_pseudo_csum =
86564c3f
YM
3486 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3487 &ipv6_hdr(skb)->daddr,
3488 0, IPPROTO_TCP, 0));
057cf65e 3489 }
f2e0899f 3490
86564c3f
YM
3491 pbd->global_data |=
3492 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3493}
f85582f8 3494
a848ade4
DK
3495/**
3496 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3497 *
3498 * @bp: driver handle
3499 * @skb: packet skb
3500 * @parsing_data: data to be updated
3501 * @xmit_type: xmit flags
3502 *
3503 * 57712/578xx related, when skb has encapsulation
3504 */
3505static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3506 u32 *parsing_data, u32 xmit_type)
3507{
3508 *parsing_data |=
3509 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3510 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3511 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3512
3513 if (xmit_type & XMIT_CSUM_TCP) {
3514 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3515 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3516 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3517
3518 return skb_inner_transport_header(skb) +
3519 inner_tcp_hdrlen(skb) - skb->data;
3520 }
3521
3522 /* We support checksum offload for TCP and UDP only.
3523 * No need to pass the UDP header length - it's a constant.
3524 */
3525 return skb_inner_transport_header(skb) +
3526 sizeof(struct udphdr) - skb->data;
3527}
3528
f2e0899f 3529/**
e8920674 3530 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3531 *
e8920674
DK
3532 * @bp: driver handle
3533 * @skb: packet skb
3534 * @parsing_data: data to be updated
3535 * @xmit_type: xmit flags
f2e0899f 3536 *
91226790 3537 * 57712/578xx related
f2e0899f 3538 */
91226790
DK
3539static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3540 u32 *parsing_data, u32 xmit_type)
f2e0899f 3541{
e39aece7 3542 *parsing_data |=
2de67439 3543 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3544 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3545 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3546
e39aece7
VZ
3547 if (xmit_type & XMIT_CSUM_TCP) {
3548 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3549 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3550 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3551
e39aece7 3552 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3553 }
3554 /* We support checksum offload for TCP and UDP only.
3555 * No need to pass the UDP header length - it's a constant.
3556 */
3557 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3558}
3559
a848ade4 3560/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3561static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3562 struct eth_tx_start_bd *tx_start_bd,
3563 u32 xmit_type)
93ef5c02 3564{
93ef5c02
DK
3565 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3566
a848ade4 3567 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3568 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3569
3570 if (!(xmit_type & XMIT_CSUM_TCP))
3571 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3572}
3573
f2e0899f 3574/**
e8920674 3575 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3576 *
e8920674
DK
3577 * @bp: driver handle
3578 * @skb: packet skb
3579 * @pbd: parse BD to be updated
3580 * @xmit_type: xmit flags
f2e0899f 3581 */
91226790
DK
3582static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3583 struct eth_tx_parse_bd_e1x *pbd,
3584 u32 xmit_type)
f2e0899f 3585{
e39aece7 3586 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3587
3588 /* for now NS flag is not used in Linux */
3589 pbd->global_data =
86564c3f
YM
3590 cpu_to_le16(hlen |
3591 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3592 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3593
3594 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3595 skb_network_header(skb)) >> 1;
f2e0899f 3596
e39aece7
VZ
3597 hlen += pbd->ip_hlen_w;
3598
3599 /* We support checksum offload for TCP and UDP only */
3600 if (xmit_type & XMIT_CSUM_TCP)
3601 hlen += tcp_hdrlen(skb) / 2;
3602 else
3603 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3604
3605 pbd->total_hlen_w = cpu_to_le16(hlen);
3606 hlen = hlen*2;
3607
3608 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3609 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3610
3611 } else {
3612 s8 fix = SKB_CS_OFF(skb); /* signed! */
3613
3614 DP(NETIF_MSG_TX_QUEUED,
3615 "hlen %d fix %d csum before fix %x\n",
3616 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3617
3618 /* HW bug: fixup the CSUM */
3619 pbd->tcp_pseudo_csum =
3620 bnx2x_csum_fix(skb_transport_header(skb),
3621 SKB_CS(skb), fix);
3622
3623 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3624 pbd->tcp_pseudo_csum);
3625 }
3626
3627 return hlen;
3628}
f85582f8 3629
a848ade4
DK
3630static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3631 struct eth_tx_parse_bd_e2 *pbd_e2,
3632 struct eth_tx_parse_2nd_bd *pbd2,
3633 u16 *global_data,
3634 u32 xmit_type)
3635{
e287a75c 3636 u16 hlen_w = 0;
a848ade4 3637 u8 outerip_off, outerip_len = 0;
e768fb29 3638
e287a75c
DK
3639 /* from outer IP to transport */
3640 hlen_w = (skb_inner_transport_header(skb) -
3641 skb_network_header(skb)) >> 1;
a848ade4
DK
3642
3643 /* transport len */
e768fb29 3644 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3645
e287a75c 3646 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3647
e768fb29
DK
3648 /* outer IP header info */
3649 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3650 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3651 u32 csum = (__force u32)(~iph->check) -
3652 (__force u32)iph->tot_len -
3653 (__force u32)iph->frag_off;
c957d09f 3654
e42780b6
DK
3655 outerip_len = iph->ihl << 1;
3656
a848ade4 3657 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3658 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3659 } else {
3660 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3661 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
e42780b6 3662 pbd_e2->data.tunnel_data.flags |=
28311f8e 3663 ETH_TUNNEL_DATA_IPV6_OUTER;
a848ade4
DK
3664 }
3665
3666 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3667
3668 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3669
e42780b6
DK
3670 /* inner IP header info */
3671 if (xmit_type & XMIT_CSUM_ENC_V4) {
e287a75c 3672 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3673
3674 pbd_e2->data.tunnel_data.pseudo_csum =
3675 bswab16(~csum_tcpudp_magic(
3676 inner_ip_hdr(skb)->saddr,
3677 inner_ip_hdr(skb)->daddr,
3678 0, IPPROTO_TCP, 0));
a848ade4
DK
3679 } else {
3680 pbd_e2->data.tunnel_data.pseudo_csum =
3681 bswab16(~csum_ipv6_magic(
3682 &inner_ipv6_hdr(skb)->saddr,
3683 &inner_ipv6_hdr(skb)->daddr,
3684 0, IPPROTO_TCP, 0));
3685 }
3686
3687 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3688
3689 *global_data |=
3690 outerip_off |
a848ade4
DK
3691 (outerip_len <<
3692 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3693 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3694 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3695
3696 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3697 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3698 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3699 }
a848ade4
DK
3700}
3701
e42780b6
DK
3702static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3703 u32 xmit_type)
3704{
3705 struct ipv6hdr *ipv6;
3706
3707 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3708 return;
3709
3710 if (xmit_type & XMIT_GSO_ENC_V6)
3711 ipv6 = inner_ipv6_hdr(skb);
3712 else /* XMIT_GSO_V6 */
3713 ipv6 = ipv6_hdr(skb);
3714
3715 if (ipv6->nexthdr == NEXTHDR_IPV6)
3716 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3717}
3718
9f6c9258
DK
3719/* called with netif_tx_lock
3720 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3721 * netif_wake_queue()
3722 */
3723netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3724{
3725 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3726
9f6c9258 3727 struct netdev_queue *txq;
6383c0b3 3728 struct bnx2x_fp_txdata *txdata;
9f6c9258 3729 struct sw_tx_bd *tx_buf;
619c5cb6 3730 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3731 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3732 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3733 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3734 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3735 u32 pbd_e2_parsing_data = 0;
9f6c9258 3736 u16 pkt_prod, bd_prod;
65565884 3737 int nbd, txq_index;
9f6c9258
DK
3738 dma_addr_t mapping;
3739 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3740 int i;
3741 u8 hlen = 0;
3742 __le16 pkt_size = 0;
3743 struct ethhdr *eth;
3744 u8 mac_type = UNICAST_ADDRESS;
3745
3746#ifdef BNX2X_STOP_ON_ERROR
3747 if (unlikely(bp->panic))
3748 return NETDEV_TX_BUSY;
3749#endif
3750
6383c0b3
AE
3751 txq_index = skb_get_queue_mapping(skb);
3752 txq = netdev_get_tx_queue(dev, txq_index);
3753
55c11941 3754 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3755
65565884 3756 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3757
3758 /* enable this debug print to view the transmission queue being used
51c1a580 3759 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3760 txq_index, fp_index, txdata_index); */
9f6c9258 3761
16a5fd92 3762 /* enable this debug print to view the transmission details
51c1a580
MS
3763 DP(NETIF_MSG_TX_QUEUED,
3764 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3765 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3766
6383c0b3 3767 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3768 skb_shinfo(skb)->nr_frags +
3769 BDS_PER_TX_PKT +
3770 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3771 /* Handle special storage cases separately */
c96bdc0c
DK
3772 if (txdata->tx_ring_size == 0) {
3773 struct bnx2x_eth_q_stats *q_stats =
3774 bnx2x_fp_qstats(bp, txdata->parent_fp);
3775 q_stats->driver_filtered_tx_pkt++;
3776 dev_kfree_skb(skb);
3777 return NETDEV_TX_OK;
3778 }
2de67439
YM
3779 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3780 netif_tx_stop_queue(txq);
c96bdc0c 3781 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3782
9f6c9258
DK
3783 return NETDEV_TX_BUSY;
3784 }
3785
51c1a580 3786 DP(NETIF_MSG_TX_QUEUED,
04c46736 3787 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3788 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3789 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3790 skb->len);
9f6c9258
DK
3791
3792 eth = (struct ethhdr *)skb->data;
3793
3794 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3795 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3796 if (is_broadcast_ether_addr(eth->h_dest))
3797 mac_type = BROADCAST_ADDRESS;
3798 else
3799 mac_type = MULTICAST_ADDRESS;
3800 }
3801
91226790 3802#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3803 /* First, check if we need to linearize the skb (due to FW
3804 restrictions). No need to check fragmentation if page size > 8K
3805 (there will be no violation to FW restrictions) */
3806 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3807 /* Statistics of linearization */
3808 bp->lin_cnt++;
3809 if (skb_linearize(skb) != 0) {
51c1a580
MS
3810 DP(NETIF_MSG_TX_QUEUED,
3811 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3812 dev_kfree_skb_any(skb);
3813 return NETDEV_TX_OK;
3814 }
3815 }
3816#endif
619c5cb6
VZ
3817 /* Map skb linear data for DMA */
3818 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3819 skb_headlen(skb), DMA_TO_DEVICE);
3820 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3821 DP(NETIF_MSG_TX_QUEUED,
3822 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3823 dev_kfree_skb_any(skb);
3824 return NETDEV_TX_OK;
3825 }
9f6c9258
DK
3826 /*
3827 Please read carefully. First we use one BD which we mark as start,
3828 then we have a parsing info BD (used for TSO or xsum),
3829 and only then we have the rest of the TSO BDs.
3830 (don't forget to mark the last one as last,
3831 and to unmap only AFTER you write to the BD ...)
3832 And above all, all pdb sizes are in words - NOT DWORDS!
3833 */
3834
619c5cb6
VZ
3835 /* get current pkt produced now - advance it just before sending packet
3836 * since mapping of pages may fail and cause packet to be dropped
3837 */
6383c0b3
AE
3838 pkt_prod = txdata->tx_pkt_prod;
3839 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3840
619c5cb6
VZ
3841 /* get a tx_buf and first BD
3842 * tx_start_bd may be changed during SPLIT,
3843 * but first_bd will always stay first
3844 */
6383c0b3
AE
3845 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3846 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3847 first_bd = tx_start_bd;
9f6c9258
DK
3848
3849 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3850
eeed018c
MK
3851 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3852 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3853 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3854 } else if (bp->ptp_tx_skb) {
3855 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3856 } else {
3857 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3858 /* schedule check for Tx timestamp */
3859 bp->ptp_tx_skb = skb_get(skb);
3860 bp->ptp_tx_start = jiffies;
3861 schedule_work(&bp->ptp_task);
3862 }
3863 }
3864
91226790
DK
3865 /* header nbd: indirectly zero other flags! */
3866 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3867
3868 /* remember the first BD of the packet */
6383c0b3 3869 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3870 tx_buf->skb = skb;
3871 tx_buf->flags = 0;
3872
3873 DP(NETIF_MSG_TX_QUEUED,
3874 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3875 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3876
df8a39de 3877 if (skb_vlan_tag_present(skb)) {
523224a3 3878 tx_start_bd->vlan_or_ethertype =
df8a39de 3879 cpu_to_le16(skb_vlan_tag_get(skb));
523224a3
DK
3880 tx_start_bd->bd_flags.as_bitfield |=
3881 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3882 } else {
3883 /* when transmitting in a vf, start bd must hold the ethertype
3884 * for fw to enforce it
3885 */
92f85f05 3886 u16 vlan_tci = 0;
ea36475a 3887#ifndef BNX2X_STOP_ON_ERROR
92f85f05 3888 if (IS_VF(bp)) {
ea36475a 3889#endif
92f85f05
MY
3890 /* Still need to consider inband vlan for enforced */
3891 if (__vlan_get_tag(skb, &vlan_tci)) {
3892 tx_start_bd->vlan_or_ethertype =
3893 cpu_to_le16(ntohs(eth->h_proto));
3894 } else {
3895 tx_start_bd->bd_flags.as_bitfield |=
3896 (X_ETH_INBAND_VLAN <<
3897 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3898 tx_start_bd->vlan_or_ethertype =
3899 cpu_to_le16(vlan_tci);
3900 }
ea36475a 3901#ifndef BNX2X_STOP_ON_ERROR
92f85f05 3902 } else {
dc1ba591
AE
3903 /* used by FW for packet accounting */
3904 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
92f85f05 3905 }
ea36475a 3906#endif
dc1ba591 3907 }
9f6c9258 3908
91226790
DK
3909 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3910
9f6c9258
DK
3911 /* turn on parsing and get a BD */
3912 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3913
93ef5c02
DK
3914 if (xmit_type & XMIT_CSUM)
3915 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3916
619c5cb6 3917 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3918 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3919 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3920
3921 if (xmit_type & XMIT_CSUM_ENC) {
3922 u16 global_data = 0;
3923
3924 /* Set PBD in enc checksum offload case */
3925 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3926 &pbd_e2_parsing_data,
3927 xmit_type);
3928
3929 /* turn on 2nd parsing and get a BD */
3930 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3931
3932 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3933
3934 memset(pbd2, 0, sizeof(*pbd2));
3935
3936 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3937 (skb_inner_network_header(skb) -
3938 skb->data) >> 1;
3939
3940 if (xmit_type & XMIT_GSO_ENC)
3941 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3942 &global_data,
3943 xmit_type);
3944
3945 pbd2->global_data = cpu_to_le16(global_data);
3946
3947 /* add addition parse BD indication to start BD */
3948 SET_FLAG(tx_start_bd->general_data,
3949 ETH_TX_START_BD_PARSE_NBDS, 1);
3950 /* set encapsulation flag in start BD */
3951 SET_FLAG(tx_start_bd->general_data,
3952 ETH_TX_START_BD_TUNNEL_EXIST, 1);
fe26566d
DK
3953
3954 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3955
a848ade4
DK
3956 nbd++;
3957 } else if (xmit_type & XMIT_CSUM) {
91226790 3958 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3959 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3960 &pbd_e2_parsing_data,
3961 xmit_type);
a848ade4 3962 }
dc1ba591 3963
e42780b6 3964 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
babe723d
YM
3965 /* Add the macs to the parsing BD if this is a vf or if
3966 * Tx Switching is enabled.
3967 */
91226790
DK
3968 if (IS_VF(bp)) {
3969 /* override GRE parameters in BD */
3970 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3971 &pbd_e2->data.mac_addr.src_mid,
3972 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3973 eth->h_source);
91226790 3974
babe723d
YM
3975 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3976 &pbd_e2->data.mac_addr.dst_mid,
3977 &pbd_e2->data.mac_addr.dst_lo,
3978 eth->h_dest);
ea36475a
YM
3979 } else {
3980 if (bp->flags & TX_SWITCHING)
3981 bnx2x_set_fw_mac_addr(
3982 &pbd_e2->data.mac_addr.dst_hi,
3983 &pbd_e2->data.mac_addr.dst_mid,
3984 &pbd_e2->data.mac_addr.dst_lo,
3985 eth->h_dest);
3986#ifdef BNX2X_STOP_ON_ERROR
3987 /* Enforce security is always set in Stop on Error -
3988 * source mac should be present in the parsing BD
3989 */
3990 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3991 &pbd_e2->data.mac_addr.src_mid,
3992 &pbd_e2->data.mac_addr.src_lo,
3993 eth->h_source);
3994#endif
619c5cb6 3995 }
96bed4b9
YM
3996
3997 SET_FLAG(pbd_e2_parsing_data,
3998 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3999 } else {
96bed4b9 4000 u16 global_data = 0;
6383c0b3 4001 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
4002 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4003 /* Set PBD in checksum offload case */
4004 if (xmit_type & XMIT_CSUM)
4005 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 4006
96bed4b9
YM
4007 SET_FLAG(global_data,
4008 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4009 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
4010 }
4011
f85582f8 4012 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
4013 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4014 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
4015 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4016 pkt_size = tx_start_bd->nbytes;
4017
51c1a580 4018 DP(NETIF_MSG_TX_QUEUED,
91226790 4019 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 4020 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 4021 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
4022 tx_start_bd->bd_flags.as_bitfield,
4023 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
4024
4025 if (xmit_type & XMIT_GSO) {
4026
4027 DP(NETIF_MSG_TX_QUEUED,
4028 "TSO packet len %d hlen %d total len %d tso size %d\n",
4029 skb->len, hlen, skb_headlen(skb),
4030 skb_shinfo(skb)->gso_size);
4031
4032 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4033
91226790
DK
4034 if (unlikely(skb_headlen(skb) > hlen)) {
4035 nbd++;
6383c0b3
AE
4036 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4037 &tx_start_bd, hlen,
91226790
DK
4038 bd_prod);
4039 }
619c5cb6 4040 if (!CHIP_IS_E1x(bp))
e42780b6
DK
4041 pbd_e2_parsing_data |=
4042 (skb_shinfo(skb)->gso_size <<
4043 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4044 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f 4045 else
e42780b6 4046 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 4047 }
2297a2da
VZ
4048
4049 /* Set the PBD's parsing_data field if not zero
4050 * (for the chips newer than 57711).
4051 */
4052 if (pbd_e2_parsing_data)
4053 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4054
9f6c9258
DK
4055 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4056
f85582f8 4057 /* Handle fragmented skb */
9f6c9258
DK
4058 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4059 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4060
9e903e08
ED
4061 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4062 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 4063 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 4064 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 4065
51c1a580
MS
4066 DP(NETIF_MSG_TX_QUEUED,
4067 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
4068
4069 /* we need unmap all buffers already mapped
4070 * for this SKB;
4071 * first_bd->nbd need to be properly updated
4072 * before call to bnx2x_free_tx_pkt
4073 */
4074 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 4075 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
4076 TX_BD(txdata->tx_pkt_prod),
4077 &pkts_compl, &bytes_compl);
619c5cb6
VZ
4078 return NETDEV_TX_OK;
4079 }
4080
9f6c9258 4081 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 4082 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4083 if (total_pkt_bd == NULL)
6383c0b3 4084 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4085
9f6c9258
DK
4086 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4087 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
4088 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4089 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 4090 nbd++;
9f6c9258
DK
4091
4092 DP(NETIF_MSG_TX_QUEUED,
4093 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4094 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4095 le16_to_cpu(tx_data_bd->nbytes));
4096 }
4097
4098 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4099
619c5cb6
VZ
4100 /* update with actual num BDs */
4101 first_bd->nbd = cpu_to_le16(nbd);
4102
9f6c9258
DK
4103 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4104
4105 /* now send a tx doorbell, counting the next BD
4106 * if the packet contains or ends with it
4107 */
4108 if (TX_BD_POFF(bd_prod) < nbd)
4109 nbd++;
4110
619c5cb6
VZ
4111 /* total_pkt_bytes should be set on the first data BD if
4112 * it's not an LSO packet and there is more than one
4113 * data BD. In this case pkt_size is limited by an MTU value.
4114 * However we prefer to set it for an LSO packet (while we don't
4115 * have to) in order to save some CPU cycles in a none-LSO
4116 * case, when we much more care about them.
4117 */
9f6c9258
DK
4118 if (total_pkt_bd != NULL)
4119 total_pkt_bd->total_pkt_bytes = pkt_size;
4120
523224a3 4121 if (pbd_e1x)
9f6c9258 4122 DP(NETIF_MSG_TX_QUEUED,
51c1a580 4123 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
4124 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4125 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4126 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4127 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
4128 if (pbd_e2)
4129 DP(NETIF_MSG_TX_QUEUED,
4130 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
4131 pbd_e2,
4132 pbd_e2->data.mac_addr.dst_hi,
4133 pbd_e2->data.mac_addr.dst_mid,
4134 pbd_e2->data.mac_addr.dst_lo,
4135 pbd_e2->data.mac_addr.src_hi,
4136 pbd_e2->data.mac_addr.src_mid,
4137 pbd_e2->data.mac_addr.src_lo,
f2e0899f 4138 pbd_e2->parsing_data);
9f6c9258
DK
4139 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4140
2df1a70a
TH
4141 netdev_tx_sent_queue(txq, skb->len);
4142
8373c57d
WB
4143 skb_tx_timestamp(skb);
4144
6383c0b3 4145 txdata->tx_pkt_prod++;
9f6c9258
DK
4146 /*
4147 * Make sure that the BD data is updated before updating the producer
4148 * since FW might read the BD right after the producer is updated.
4149 * This is only applicable for weak-ordered memory model archs such
4150 * as IA-64. The following barrier is also mandatory since FW will
4151 * assumes packets must have BDs.
4152 */
4153 wmb();
4154
6383c0b3 4155 txdata->tx_db.data.prod += nbd;
edd87423
SK
4156 /* make sure descriptor update is observed by HW */
4157 wmb();
f85582f8 4158
7f883c77 4159 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4160
4161 mmiowb();
4162
6383c0b3 4163 txdata->tx_bd_prod += nbd;
9f6c9258 4164
7df2dc6b 4165 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4166 netif_tx_stop_queue(txq);
4167
4168 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4169 * ordering of set_bit() in netif_tx_stop_queue() and read of
4170 * fp->bd_tx_cons */
4171 smp_mb();
4172
15192a8c 4173 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4174 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4175 netif_tx_wake_queue(txq);
4176 }
6383c0b3 4177 txdata->tx_pkt++;
9f6c9258
DK
4178
4179 return NETDEV_TX_OK;
4180}
f85582f8 4181
230d00eb
YM
4182void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4183{
4184 int mfw_vn = BP_FW_MB_IDX(bp);
4185 u32 tmp;
4186
4187 /* If the shmem shouldn't affect configuration, reflect */
4188 if (!IS_MF_BD(bp)) {
4189 int i;
4190
4191 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4192 c2s_map[i] = i;
4193 *c2s_default = 0;
4194
4195 return;
4196 }
4197
4198 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4199 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4200 c2s_map[0] = tmp & 0xff;
4201 c2s_map[1] = (tmp >> 8) & 0xff;
4202 c2s_map[2] = (tmp >> 16) & 0xff;
4203 c2s_map[3] = (tmp >> 24) & 0xff;
4204
4205 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4206 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4207 c2s_map[4] = tmp & 0xff;
4208 c2s_map[5] = (tmp >> 8) & 0xff;
4209 c2s_map[6] = (tmp >> 16) & 0xff;
4210 c2s_map[7] = (tmp >> 24) & 0xff;
4211
4212 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4213 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4214 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4215}
4216
6383c0b3
AE
4217/**
4218 * bnx2x_setup_tc - routine to configure net_device for multi tc
4219 *
4220 * @netdev: net device to configure
4221 * @tc: number of traffic classes to enable
4222 *
4223 * callback connected to the ndo_setup_tc function pointer
4224 */
4225int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4226{
6383c0b3 4227 struct bnx2x *bp = netdev_priv(dev);
230d00eb
YM
4228 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4229 int cos, prio, count, offset;
6383c0b3
AE
4230
4231 /* setup tc must be called under rtnl lock */
4232 ASSERT_RTNL();
4233
16a5fd92 4234 /* no traffic classes requested. Aborting */
6383c0b3
AE
4235 if (!num_tc) {
4236 netdev_reset_tc(dev);
4237 return 0;
4238 }
4239
4240 /* requested to support too many traffic classes */
4241 if (num_tc > bp->max_cos) {
6bf07b8e 4242 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4243 num_tc, bp->max_cos);
6383c0b3
AE
4244 return -EINVAL;
4245 }
4246
4247 /* declare amount of supported traffic classes */
4248 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4249 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4250 return -EINVAL;
4251 }
4252
230d00eb
YM
4253 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4254
6383c0b3
AE
4255 /* configure priority to traffic class mapping */
4256 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
230d00eb
YM
4257 int outer_prio = c2s_map[prio];
4258
4259 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
51c1a580
MS
4260 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4261 "mapping priority %d to tc %d\n",
230d00eb 4262 outer_prio, bp->prio_to_cos[outer_prio]);
6383c0b3
AE
4263 }
4264
16a5fd92 4265 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4266 This can be used for ets or pfc, and save the effort of setting
4267 up a multio class queue disc or negotiating DCBX with a switch
4268 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4269 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4270 for (prio = 1; prio < 16; prio++) {
4271 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4272 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4273 } */
4274
4275 /* configure traffic class to transmission queue mapping */
4276 for (cos = 0; cos < bp->max_cos; cos++) {
4277 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4278 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4279 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4280 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4281 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4282 cos, offset, count);
4283 }
4284
4285 return 0;
4286}
4287
2572ac53 4288int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 4289 void *type_data)
e4c6734e 4290{
de4784ca
JP
4291 struct tc_mqprio_qopt *mqprio = type_data;
4292
575ed7d3 4293 if (type != TC_SETUP_QDISC_MQPRIO)
38cf0426 4294 return -EOPNOTSUPP;
56f36acd 4295
de4784ca 4296 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 4297
de4784ca 4298 return bnx2x_setup_tc(dev, mqprio->num_tc);
e4c6734e
JF
4299}
4300
9f6c9258
DK
4301/* called with rtnl_lock */
4302int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4303{
4304 struct sockaddr *addr = p;
4305 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4306 int rc = 0;
9f6c9258 4307
2e98ffc2 4308 if (!is_valid_ether_addr(addr->sa_data)) {
51c1a580 4309 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4310 return -EINVAL;
51c1a580 4311 }
614c76df 4312
2e98ffc2
DK
4313 if (IS_MF_STORAGE_ONLY(bp)) {
4314 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
9f6c9258 4315 return -EINVAL;
51c1a580 4316 }
9f6c9258 4317
619c5cb6
VZ
4318 if (netif_running(dev)) {
4319 rc = bnx2x_set_eth_mac(bp, false);
4320 if (rc)
4321 return rc;
4322 }
4323
9f6c9258 4324 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4325
523224a3 4326 if (netif_running(dev))
619c5cb6 4327 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4328
230d00eb
YM
4329 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4330 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4331
619c5cb6 4332 return rc;
9f6c9258
DK
4333}
4334
b3b83c3f
DK
4335static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4336{
4337 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4338 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4339 u8 cos;
b3b83c3f
DK
4340
4341 /* Common */
55c11941 4342
b3b83c3f
DK
4343 if (IS_FCOE_IDX(fp_index)) {
4344 memset(sb, 0, sizeof(union host_hc_status_block));
4345 fp->status_blk_mapping = 0;
b3b83c3f 4346 } else {
b3b83c3f 4347 /* status blocks */
619c5cb6 4348 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4349 BNX2X_PCI_FREE(sb->e2_sb,
4350 bnx2x_fp(bp, fp_index,
4351 status_blk_mapping),
4352 sizeof(struct host_hc_status_block_e2));
4353 else
4354 BNX2X_PCI_FREE(sb->e1x_sb,
4355 bnx2x_fp(bp, fp_index,
4356 status_blk_mapping),
4357 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4358 }
55c11941 4359
b3b83c3f
DK
4360 /* Rx */
4361 if (!skip_rx_queue(bp, fp_index)) {
4362 bnx2x_free_rx_bds(fp);
4363
4364 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4365 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4366 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4367 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4368 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4369
4370 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4371 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4372 sizeof(struct eth_fast_path_rx_cqe) *
4373 NUM_RCQ_BD);
4374
4375 /* SGE ring */
4376 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4377 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4378 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4379 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4380 }
4381
4382 /* Tx */
4383 if (!skip_tx_queue(bp, fp_index)) {
4384 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4385 for_each_cos_in_tx_queue(fp, cos) {
65565884 4386 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4387
51c1a580 4388 DP(NETIF_MSG_IFDOWN,
94f05b0f 4389 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4390 fp_index, cos, txdata->cid);
4391
4392 BNX2X_FREE(txdata->tx_buf_ring);
4393 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4394 txdata->tx_desc_mapping,
4395 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4396 }
b3b83c3f
DK
4397 }
4398 /* end of fastpath */
4399}
4400
a8f47eb7 4401static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4402{
4403 int i;
4404 for_each_cnic_queue(bp, i)
4405 bnx2x_free_fp_mem_at(bp, i);
4406}
4407
b3b83c3f
DK
4408void bnx2x_free_fp_mem(struct bnx2x *bp)
4409{
4410 int i;
55c11941 4411 for_each_eth_queue(bp, i)
b3b83c3f
DK
4412 bnx2x_free_fp_mem_at(bp, i);
4413}
4414
1191cb83 4415static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4416{
4417 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4418 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4419 bnx2x_fp(bp, index, sb_index_values) =
4420 (__le16 *)status_blk.e2_sb->sb.index_values;
4421 bnx2x_fp(bp, index, sb_running_index) =
4422 (__le16 *)status_blk.e2_sb->sb.running_index;
4423 } else {
4424 bnx2x_fp(bp, index, sb_index_values) =
4425 (__le16 *)status_blk.e1x_sb->sb.index_values;
4426 bnx2x_fp(bp, index, sb_running_index) =
4427 (__le16 *)status_blk.e1x_sb->sb.running_index;
4428 }
4429}
4430
1191cb83
ED
4431/* Returns the number of actually allocated BDs */
4432static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4433 int rx_ring_size)
4434{
4435 struct bnx2x *bp = fp->bp;
4436 u16 ring_prod, cqe_ring_prod;
4437 int i, failure_cnt = 0;
4438
4439 fp->rx_comp_cons = 0;
4440 cqe_ring_prod = ring_prod = 0;
4441
4442 /* This routine is called only during fo init so
4443 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4444 */
4445 for (i = 0; i < rx_ring_size; i++) {
996dedba 4446 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4447 failure_cnt++;
4448 continue;
4449 }
4450 ring_prod = NEXT_RX_IDX(ring_prod);
4451 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4452 WARN_ON(ring_prod <= (i - failure_cnt));
4453 }
4454
4455 if (failure_cnt)
4456 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4457 i - failure_cnt, fp->index);
4458
4459 fp->rx_bd_prod = ring_prod;
4460 /* Limit the CQE producer by the CQE ring size */
4461 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4462 cqe_ring_prod);
1191cb83 4463
15192a8c 4464 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4465
4466 return i - failure_cnt;
4467}
4468
4469static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4470{
4471 int i;
4472
4473 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4474 struct eth_rx_cqe_next_page *nextpg;
4475
4476 nextpg = (struct eth_rx_cqe_next_page *)
4477 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4478 nextpg->addr_hi =
4479 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4480 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4481 nextpg->addr_lo =
4482 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4483 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4484 }
4485}
4486
b3b83c3f
DK
4487static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4488{
4489 union host_hc_status_block *sb;
4490 struct bnx2x_fastpath *fp = &bp->fp[index];
4491 int ring_size = 0;
6383c0b3 4492 u8 cos;
c2188952 4493 int rx_ring_size = 0;
b3b83c3f 4494
2e98ffc2 4495 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
614c76df
DK
4496 rx_ring_size = MIN_RX_SIZE_NONTPA;
4497 bp->rx_ring_size = rx_ring_size;
55c11941 4498 } else if (!bp->rx_ring_size) {
c2188952
VZ
4499 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4500
065f8b92
YM
4501 if (CHIP_IS_E3(bp)) {
4502 u32 cfg = SHMEM_RD(bp,
4503 dev_info.port_hw_config[BP_PORT(bp)].
4504 default_cfg);
4505
4506 /* Decrease ring size for 1G functions */
4507 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4508 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4509 rx_ring_size /= 10;
4510 }
d760fc37 4511
c2188952
VZ
4512 /* allocate at least number of buffers required by FW */
4513 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4514 MIN_RX_SIZE_TPA, rx_ring_size);
4515
4516 bp->rx_ring_size = rx_ring_size;
614c76df 4517 } else /* if rx_ring_size specified - use it */
c2188952 4518 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4519
04c46736
YM
4520 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4521
b3b83c3f
DK
4522 /* Common */
4523 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4524
b3b83c3f 4525 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4526 /* status blocks */
cd2b0389
JP
4527 if (!CHIP_IS_E1x(bp)) {
4528 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4529 sizeof(struct host_hc_status_block_e2));
4530 if (!sb->e2_sb)
4531 goto alloc_mem_err;
4532 } else {
4533 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4534 sizeof(struct host_hc_status_block_e1x));
4535 if (!sb->e1x_sb)
4536 goto alloc_mem_err;
4537 }
b3b83c3f 4538 }
8eef2af1
DK
4539
4540 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4541 * set shortcuts for it.
4542 */
4543 if (!IS_FCOE_IDX(index))
4544 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4545
4546 /* Tx */
4547 if (!skip_tx_queue(bp, index)) {
4548 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4549 for_each_cos_in_tx_queue(fp, cos) {
65565884 4550 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4551
51c1a580
MS
4552 DP(NETIF_MSG_IFUP,
4553 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4554 index, cos);
4555
cd2b0389
JP
4556 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4557 sizeof(struct sw_tx_bd),
4558 GFP_KERNEL);
4559 if (!txdata->tx_buf_ring)
4560 goto alloc_mem_err;
4561 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4562 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4563 if (!txdata->tx_desc_ring)
4564 goto alloc_mem_err;
6383c0b3 4565 }
b3b83c3f
DK
4566 }
4567
4568 /* Rx */
4569 if (!skip_rx_queue(bp, index)) {
4570 /* fastpath rx rings: rx_buf rx_desc rx_comp */
cd2b0389
JP
4571 bnx2x_fp(bp, index, rx_buf_ring) =
4572 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4573 if (!bnx2x_fp(bp, index, rx_buf_ring))
4574 goto alloc_mem_err;
4575 bnx2x_fp(bp, index, rx_desc_ring) =
4576 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4577 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4578 if (!bnx2x_fp(bp, index, rx_desc_ring))
4579 goto alloc_mem_err;
b3b83c3f 4580
75b29459 4581 /* Seed all CQEs by 1s */
cd2b0389
JP
4582 bnx2x_fp(bp, index, rx_comp_ring) =
4583 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4584 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4585 if (!bnx2x_fp(bp, index, rx_comp_ring))
4586 goto alloc_mem_err;
b3b83c3f
DK
4587
4588 /* SGE ring */
cd2b0389
JP
4589 bnx2x_fp(bp, index, rx_page_ring) =
4590 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4591 GFP_KERNEL);
4592 if (!bnx2x_fp(bp, index, rx_page_ring))
4593 goto alloc_mem_err;
4594 bnx2x_fp(bp, index, rx_sge_ring) =
4595 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4596 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4597 if (!bnx2x_fp(bp, index, rx_sge_ring))
4598 goto alloc_mem_err;
b3b83c3f
DK
4599 /* RX BD ring */
4600 bnx2x_set_next_page_rx_bd(fp);
4601
4602 /* CQ ring */
4603 bnx2x_set_next_page_rx_cq(fp);
4604
4605 /* BDs */
4606 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4607 if (ring_size < rx_ring_size)
4608 goto alloc_mem_err;
4609 }
4610
4611 return 0;
4612
4613/* handles low memory cases */
4614alloc_mem_err:
4615 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4616 index, ring_size);
4617 /* FW will drop all packets if queue is not big enough,
4618 * In these cases we disable the queue
6383c0b3 4619 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f 4620 */
7e6b4d44 4621 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
eb722d7a 4622 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4623 /* release memory allocated for this queue */
4624 bnx2x_free_fp_mem_at(bp, index);
4625 return -ENOMEM;
4626 }
4627 return 0;
4628}
4629
a8f47eb7 4630static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4631{
4632 if (!NO_FCOE(bp))
4633 /* FCoE */
4634 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4635 /* we will fail load process instead of mark
4636 * NO_FCOE_FLAG
4637 */
4638 return -ENOMEM;
4639
4640 return 0;
4641}
4642
a8f47eb7 4643static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
b3b83c3f
DK
4644{
4645 int i;
4646
55c11941
MS
4647 /* 1. Allocate FP for leading - fatal if error
4648 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4649 */
4650
4651 /* leading */
4652 if (bnx2x_alloc_fp_mem_at(bp, 0))
4653 return -ENOMEM;
6383c0b3 4654
b3b83c3f
DK
4655 /* RSS */
4656 for_each_nondefault_eth_queue(bp, i)
4657 if (bnx2x_alloc_fp_mem_at(bp, i))
4658 break;
4659
4660 /* handle memory failures */
4661 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4662 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4663
4664 WARN_ON(delta < 0);
4864a16a 4665 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4666 if (CNIC_SUPPORT(bp))
4667 /* move non eth FPs next to last eth FP
4668 * must be done in that order
4669 * FCOE_IDX < FWD_IDX < OOO_IDX
4670 */
b3b83c3f 4671
55c11941
MS
4672 /* move FCoE fp even NO_FCOE_FLAG is on */
4673 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4674 bp->num_ethernet_queues -= delta;
4675 bp->num_queues = bp->num_ethernet_queues +
4676 bp->num_cnic_queues;
b3b83c3f
DK
4677 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4678 bp->num_queues + delta, bp->num_queues);
4679 }
4680
4681 return 0;
4682}
d6214d7a 4683
523224a3
DK
4684void bnx2x_free_mem_bp(struct bnx2x *bp)
4685{
c3146eb6
DK
4686 int i;
4687
4688 for (i = 0; i < bp->fp_array_size; i++)
4689 kfree(bp->fp[i].tpa_info);
523224a3 4690 kfree(bp->fp);
15192a8c
BW
4691 kfree(bp->sp_objs);
4692 kfree(bp->fp_stats);
65565884 4693 kfree(bp->bnx2x_txq);
523224a3
DK
4694 kfree(bp->msix_table);
4695 kfree(bp->ilt);
4696}
4697
0329aba1 4698int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4699{
4700 struct bnx2x_fastpath *fp;
4701 struct msix_entry *tbl;
4702 struct bnx2x_ilt *ilt;
6383c0b3 4703 int msix_table_size = 0;
55c11941 4704 int fp_array_size, txq_array_size;
15192a8c 4705 int i;
6383c0b3
AE
4706
4707 /*
4708 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4709 * path IGU SBs plus default SB (for PF only).
6383c0b3 4710 */
1ab4434c
AE
4711 msix_table_size = bp->igu_sb_cnt;
4712 if (IS_PF(bp))
4713 msix_table_size++;
4714 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4715
6383c0b3 4716 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4717 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4718 bp->fp_array_size = fp_array_size;
4719 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4720
c3146eb6 4721 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4722 if (!fp)
4723 goto alloc_err;
c3146eb6 4724 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4725 fp[i].tpa_info =
4726 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4727 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4728 if (!(fp[i].tpa_info))
4729 goto alloc_err;
4730 }
4731
523224a3
DK
4732 bp->fp = fp;
4733
15192a8c 4734 /* allocate sp objs */
c3146eb6 4735 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4736 GFP_KERNEL);
4737 if (!bp->sp_objs)
4738 goto alloc_err;
4739
4740 /* allocate fp_stats */
c3146eb6 4741 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4742 GFP_KERNEL);
4743 if (!bp->fp_stats)
4744 goto alloc_err;
4745
65565884 4746 /* Allocate memory for the transmission queues array */
55c11941
MS
4747 txq_array_size =
4748 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4749 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4750
4751 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4752 GFP_KERNEL);
65565884
MS
4753 if (!bp->bnx2x_txq)
4754 goto alloc_err;
4755
523224a3 4756 /* msix table */
01e23742 4757 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4758 if (!tbl)
4759 goto alloc_err;
4760 bp->msix_table = tbl;
4761
4762 /* ilt */
4763 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4764 if (!ilt)
4765 goto alloc_err;
4766 bp->ilt = ilt;
4767
4768 return 0;
4769alloc_err:
4770 bnx2x_free_mem_bp(bp);
4771 return -ENOMEM;
523224a3
DK
4772}
4773
a9fccec7 4774int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4775{
4776 struct bnx2x *bp = netdev_priv(dev);
4777
4778 if (unlikely(!netif_running(dev)))
4779 return 0;
4780
5d07d868 4781 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4782 return bnx2x_nic_load(bp, LOAD_NORMAL);
4783}
4784
1ac9e428
YR
4785int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4786{
4787 u32 sel_phy_idx = 0;
4788 if (bp->link_params.num_phys <= 1)
4789 return INT_PHY;
4790
4791 if (bp->link_vars.link_up) {
4792 sel_phy_idx = EXT_PHY1;
4793 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4794 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4795 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4796 sel_phy_idx = EXT_PHY2;
4797 } else {
4798
4799 switch (bnx2x_phy_selection(&bp->link_params)) {
4800 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4801 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4802 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4803 sel_phy_idx = EXT_PHY1;
4804 break;
4805 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4806 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4807 sel_phy_idx = EXT_PHY2;
4808 break;
4809 }
4810 }
4811
4812 return sel_phy_idx;
1ac9e428
YR
4813}
4814int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4815{
4816 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4817 /*
2de67439 4818 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4819 * swapping is enabled). So when swapping is enabled, we need to reverse
4820 * the configuration
4821 */
4822
4823 if (bp->link_params.multi_phy_config &
4824 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4825 if (sel_phy_idx == EXT_PHY1)
4826 sel_phy_idx = EXT_PHY2;
4827 else if (sel_phy_idx == EXT_PHY2)
4828 sel_phy_idx = EXT_PHY1;
4829 }
4830 return LINK_CONFIG_IDX(sel_phy_idx);
4831}
4832
55c11941 4833#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4834int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4835{
4836 struct bnx2x *bp = netdev_priv(dev);
4837 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4838
4839 switch (type) {
4840 case NETDEV_FCOE_WWNN:
4841 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4842 cp->fcoe_wwn_node_name_lo);
4843 break;
4844 case NETDEV_FCOE_WWPN:
4845 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4846 cp->fcoe_wwn_port_name_lo);
4847 break;
4848 default:
51c1a580 4849 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4850 return -EINVAL;
4851 }
4852
4853 return 0;
4854}
4855#endif
4856
9f6c9258
DK
4857/* called with rtnl_lock */
4858int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4859{
4860 struct bnx2x *bp = netdev_priv(dev);
9f6c9258 4861
0650c0b8
YM
4862 if (pci_num_vf(bp->pdev)) {
4863 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4864 return -EPERM;
4865 }
4866
9f6c9258 4867 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4868 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4869 return -EAGAIN;
4870 }
4871
9f6c9258
DK
4872 /* This does not race with packet allocation
4873 * because the actual alloc size is
4874 * only updated as part of load
4875 */
4876 dev->mtu = new_mtu;
4877
3c3def5f
MC
4878 if (!bnx2x_mtu_allows_gro(new_mtu))
4879 dev->features &= ~NETIF_F_GRO_HW;
4880
230d00eb
YM
4881 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4882 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4883
66371c44
MM
4884 return bnx2x_reload_if_running(dev);
4885}
4886
c8f44aff 4887netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4888 netdev_features_t features)
66371c44
MM
4889{
4890 struct bnx2x *bp = netdev_priv(dev);
4891
909d9faa
YM
4892 if (pci_num_vf(bp->pdev)) {
4893 netdev_features_t changed = dev->features ^ features;
4894
4895 /* Revert the requested changes in features if they
4896 * would require internal reload of PF in bnx2x_set_features().
4897 */
4898 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4899 features &= ~NETIF_F_RXCSUM;
4900 features |= dev->features & NETIF_F_RXCSUM;
4901 }
4902
4903 if (changed & NETIF_F_LOOPBACK) {
4904 features &= ~NETIF_F_LOOPBACK;
4905 features |= dev->features & NETIF_F_LOOPBACK;
4906 }
4907 }
4908
66371c44 4909 /* TPA requires Rx CSUM offloading */
3c3def5f
MC
4910 if (!(features & NETIF_F_RXCSUM))
4911 features &= ~NETIF_F_LRO;
4912
4913 if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4914 features &= ~NETIF_F_GRO_HW;
4915 if (features & NETIF_F_GRO_HW)
66371c44
MM
4916 features &= ~NETIF_F_LRO;
4917
4918 return features;
4919}
4920
c8f44aff 4921int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4922{
4923 struct bnx2x *bp = netdev_priv(dev);
f8dcb5e3 4924 netdev_features_t changes = features ^ dev->features;
538dd2e3 4925 bool bnx2x_reload = false;
f8dcb5e3 4926 int rc;
621b4d66 4927
909d9faa
YM
4928 /* VFs or non SRIOV PFs should be able to change loopback feature */
4929 if (!pci_num_vf(bp->pdev)) {
4930 if (features & NETIF_F_LOOPBACK) {
4931 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4932 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4933 bnx2x_reload = true;
4934 }
4935 } else {
4936 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4937 bp->link_params.loopback_mode = LOOPBACK_NONE;
4938 bnx2x_reload = true;
4939 }
538dd2e3
MB
4940 }
4941 }
4942
3c3def5f
MC
4943 /* Don't care about GRO changes */
4944 changes &= ~NETIF_F_GRO;
aebf6244 4945
8802f579 4946 if (changes)
538dd2e3 4947 bnx2x_reload = true;
8802f579 4948
538dd2e3 4949 if (bnx2x_reload) {
f8dcb5e3
MS
4950 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4951 dev->features = features;
4952 rc = bnx2x_reload_if_running(dev);
4953 return rc ? rc : 1;
4954 }
66371c44 4955 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4956 }
4957
66371c44 4958 return 0;
9f6c9258
DK
4959}
4960
4961void bnx2x_tx_timeout(struct net_device *dev)
4962{
4963 struct bnx2x *bp = netdev_priv(dev);
4964
1b40428c
SRK
4965 /* We want the information of the dump logged,
4966 * but calling bnx2x_panic() would kill all chances of recovery.
4967 */
9f6c9258 4968 if (!bp->panic)
1b40428c
SRK
4969#ifndef BNX2X_STOP_ON_ERROR
4970 bnx2x_panic_dump(bp, false);
4971#else
9f6c9258
DK
4972 bnx2x_panic();
4973#endif
7be08a72 4974
9f6c9258 4975 /* This allows the netif to be shutdown gracefully before resetting */
230bb0f3 4976 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
9f6c9258
DK
4977}
4978
9f6c9258
DK
4979int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4980{
4981 struct net_device *dev = pci_get_drvdata(pdev);
4982 struct bnx2x *bp;
4983
4984 if (!dev) {
4985 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4986 return -ENODEV;
4987 }
4988 bp = netdev_priv(dev);
4989
4990 rtnl_lock();
4991
4992 pci_save_state(pdev);
4993
4994 if (!netif_running(dev)) {
4995 rtnl_unlock();
4996 return 0;
4997 }
4998
4999 netif_device_detach(dev);
5000
5d07d868 5001 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
5002
5003 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5004
5005 rtnl_unlock();
5006
5007 return 0;
5008}
5009
5010int bnx2x_resume(struct pci_dev *pdev)
5011{
5012 struct net_device *dev = pci_get_drvdata(pdev);
5013 struct bnx2x *bp;
5014 int rc;
5015
5016 if (!dev) {
5017 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5018 return -ENODEV;
5019 }
5020 bp = netdev_priv(dev);
5021
5022 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 5023 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
5024 return -EAGAIN;
5025 }
5026
5027 rtnl_lock();
5028
5029 pci_restore_state(pdev);
5030
5031 if (!netif_running(dev)) {
5032 rtnl_unlock();
5033 return 0;
5034 }
5035
5036 bnx2x_set_power_state(bp, PCI_D0);
5037 netif_device_attach(dev);
5038
5039 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5040
5041 rtnl_unlock();
5042
5043 return rc;
5044}
619c5cb6 5045
619c5cb6
VZ
5046void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5047 u32 cid)
5048{
b9871bcf
AE
5049 if (!cxt) {
5050 BNX2X_ERR("bad context pointer %p\n", cxt);
5051 return;
5052 }
5053
619c5cb6
VZ
5054 /* ustorm cxt validation */
5055 cxt->ustorm_ag_context.cdu_usage =
5056 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5057 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5058 /* xcontext validation */
5059 cxt->xstorm_ag_context.cdu_reserved =
5060 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5061 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5062}
5063
1191cb83
ED
5064static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5065 u8 fw_sb_id, u8 sb_index,
5066 u8 ticks)
619c5cb6 5067{
619c5cb6
VZ
5068 u32 addr = BAR_CSTRORM_INTMEM +
5069 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5070 REG_WR8(bp, addr, ticks);
51c1a580
MS
5071 DP(NETIF_MSG_IFUP,
5072 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5073 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
5074}
5075
1191cb83
ED
5076static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5077 u16 fw_sb_id, u8 sb_index,
5078 u8 disable)
619c5cb6
VZ
5079{
5080 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5081 u32 addr = BAR_CSTRORM_INTMEM +
5082 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 5083 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
5084 /* clear and set */
5085 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5086 flags |= enable_flag;
0c14e5ce 5087 REG_WR8(bp, addr, flags);
51c1a580
MS
5088 DP(NETIF_MSG_IFUP,
5089 "port %x fw_sb_id %d sb_index %d disable %d\n",
5090 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
5091}
5092
5093void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5094 u8 sb_index, u8 disable, u16 usec)
5095{
5096 int port = BP_PORT(bp);
5097 u8 ticks = usec / BNX2X_BTR;
5098
5099 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5100
5101 disable = disable ? 1 : (usec ? 0 : 1);
5102 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5103}
230bb0f3
YM
5104
5105void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5106 u32 verbose)
5107{
4e857c58 5108 smp_mb__before_atomic();
230bb0f3 5109 set_bit(flag, &bp->sp_rtnl_state);
4e857c58 5110 smp_mb__after_atomic();
230bb0f3
YM
5111 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5112 flag);
5113 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5114}