Merge branches 'acpi-resources', 'acpi-battery', 'acpi-doc' and 'acpi-pnp'
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
08f6dd89 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
9f6c9258
DK
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
c9931896 24#include <linux/crash_dump.h>
9969085e 25#include <net/tcp.h>
f2e0899f 26#include <net/ipv6.h>
7f3e01fe 27#include <net/ip6_checksum.h>
076bb0c8 28#include <net/busy_poll.h>
c0cba59e 29#include <linux/prefetch.h>
9f6c9258 30#include "bnx2x_cmn.h"
523224a3 31#include "bnx2x_init.h"
042181f5 32#include "bnx2x_sp.h"
9f6c9258 33
a8f47eb7 34static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
35static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
36static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
37static int bnx2x_poll(struct napi_struct *napi, int budget);
38
39static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
40{
41 int i;
42
43 /* Add NAPI objects */
44 for_each_rx_queue_cnic(bp, i) {
45 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
46 bnx2x_poll, NAPI_POLL_WEIGHT);
47 napi_hash_add(&bnx2x_fp(bp, i, napi));
48 }
49}
50
51static void bnx2x_add_all_napi(struct bnx2x *bp)
52{
53 int i;
54
55 /* Add NAPI objects */
56 for_each_eth_queue(bp, i) {
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58 bnx2x_poll, NAPI_POLL_WEIGHT);
59 napi_hash_add(&bnx2x_fp(bp, i, napi));
60 }
61}
62
63static int bnx2x_calc_num_queues(struct bnx2x *bp)
64{
7d0445d6 65 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
ff2ad307
MS
66
67 /* Reduce memory usage in kdump environment by using only one queue */
c9931896 68 if (is_kdump_kernel())
ff2ad307
MS
69 nq = 1;
70
7d0445d6
MS
71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 return nq;
a8f47eb7 73}
74
b3b83c3f
DK
75/**
76 * bnx2x_move_fp - move content of the fastpath structure.
77 *
78 * @bp: driver handle
79 * @from: source FP index
80 * @to: destination FP index
81 *
82 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
83 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
65565884
MS
85 * source onto the target. Update txdata pointers and related
86 * content.
b3b83c3f
DK
87 */
88static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89{
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
96 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 98 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
99
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp->napi = to_fp->napi;
102
b3b83c3f
DK
103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
105 to_fp->index = to;
65565884 106
34d5626a
YM
107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
109 */
110 to_fp->tpa_info = old_tpa_info;
111
15192a8c
BW
112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
65565884
MS
118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
121 */
122
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 (bp)->max_cos;
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 }
130
4864a16a
YM
131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
135}
136
8ca5e17e
AE
137/**
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
139 *
140 * @bp: driver handle
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
143 *
144 */
145void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146{
147 if (IS_PF(bp)) {
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 "bc %d.%d.%d%s%s",
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 } else {
6411280a 161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
162 }
163}
164
4864a16a
YM
165/**
166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
167 *
168 * @bp: driver handle
169 * @delta: number of eth queues which were not allocated
170 */
171static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172{
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 176 * backward along the array could cause memory to be overridden
4864a16a
YM
177 */
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
182
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 }
187 }
188}
189
a8f47eb7 190int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
619c5cb6 191
9f6c9258
DK
192/* free skb in the packet ring at pos idx
193 * return idx of last bd freed
194 */
6383c0b3 195static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
9f6c9258 198{
6383c0b3 199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 int nbd;
95e92fd4 205 u16 split_bd_len = 0;
9f6c9258
DK
206
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
208 prefetch(&skb->end);
209
51c1a580 210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 211 txdata->txq_index, idx, tx_buf, skb);
9f6c9258 212
6383c0b3 213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258
DK
214
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216#ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
219 bnx2x_panic();
220 }
221#endif
222 new_cons = nbd + tx_buf->first_bd;
223
224 /* Get the next bd */
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227 /* Skip a parse bd... */
228 --nbd;
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
fe26566d
DK
231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232 /* Skip second parse bd... */
233 --nbd;
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 }
236
95e92fd4 237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
9f6c9258 238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
95e92fd4
MS
239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
9f6c9258
DK
241 --nbd;
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 }
244
95e92fd4
MS
245 /* unmap first bd */
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 DMA_TO_DEVICE);
249
9f6c9258
DK
250 /* now free frags */
251 while (nbd > 0) {
252
6383c0b3 253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 if (--nbd)
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 }
259
260 /* release skb */
261 WARN_ON(!skb);
d8290ae5 262 if (likely(skb)) {
2df1a70a
TH
263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len;
265 }
d8290ae5 266
40955532 267 dev_kfree_skb_any(skb);
9f6c9258
DK
268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL;
270
271 return new_cons;
272}
273
6383c0b3 274int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 275{
9f6c9258 276 struct netdev_queue *txq;
6383c0b3 277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 278 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
279
280#ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
282 return -1;
283#endif
284
6383c0b3
AE
285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
288
289 while (sw_cons != hw_cons) {
290 u16 pkt_cons;
291
292 pkt_cons = TX_BD(sw_cons);
293
51c1a580
MS
294 DP(NETIF_MSG_TX_DONE,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 296 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 297
2df1a70a 298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 299 &pkts_compl, &bytes_compl);
2df1a70a 300
9f6c9258
DK
301 sw_cons++;
302 }
303
2df1a70a
TH
304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
6383c0b3
AE
306 txdata->tx_pkt_cons = sw_cons;
307 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
308
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
313 * forever.
619c5cb6
VZ
314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
317 */
318 smp_mb();
319
9f6c9258 320 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 321 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 *
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
328 * stops the queue
329 */
330
331 __netif_tx_lock(txq, smp_processor_id());
332
333 if ((netif_tx_queue_stopped(txq)) &&
334 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
336 netif_tx_wake_queue(txq);
337
338 __netif_tx_unlock(txq);
339 }
340 return 0;
341}
342
343static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344 u16 idx)
345{
346 u16 last_max = fp->last_max_sge;
347
348 if (SUB_S16(idx, last_max) > 0)
349 fp->last_max_sge = idx;
350}
351
621b4d66
DK
352static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 u16 sge_len,
354 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
355{
356 struct bnx2x *bp = fp->bp;
9f6c9258
DK
357 u16 last_max, last_elem, first_elem;
358 u16 delta = 0;
359 u16 i;
360
361 if (!sge_len)
362 return;
363
364 /* First mark all used pages */
365 for (i = 0; i < sge_len; i++)
619c5cb6 366 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
368
369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
371
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp->sge_mask));
523224a3 374 bnx2x_update_last_max_sge(fp,
621b4d66 375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
376
377 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
380
381 /* If ring is not full */
382 if (last_elem + 1 != first_elem)
383 last_elem++;
384
385 /* Now update the prod */
386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 if (likely(fp->sge_mask[i]))
388 break;
389
619c5cb6
VZ
390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
392 }
393
394 if (delta > 0) {
395 fp->rx_sge_prod += delta;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp);
398 }
399
400 DP(NETIF_MSG_RX_STATUS,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp->last_max_sge, fp->rx_sge_prod);
403}
404
2de67439 405/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
406 * CQE (calculated by HW).
407 */
408static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb 409 const struct eth_fast_path_rx_cqe *cqe,
5495ab75 410 enum pkt_hash_types *rxhash_type)
e52fcb24 411{
2de67439 412 /* Get Toeplitz hash from CQE */
e52fcb24 413 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 enum eth_rss_hash_type htype;
416
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
5495ab75
TH
418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 (htype == TCP_IPV6_HASH_TYPE)) ?
420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
e52fcb24 422 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb 423 }
5495ab75 424 *rxhash_type = PKT_HASH_TYPE_NONE;
e52fcb24
ED
425 return 0;
426}
427
9f6c9258 428static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 429 u16 cons, u16 prod,
619c5cb6 430 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
431{
432 struct bnx2x *bp = fp->bp;
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 dma_addr_t mapping;
619c5cb6
VZ
437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 439
619c5cb6
VZ
440 /* print error if current state != stop */
441 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
442 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
e52fcb24 444 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 445 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 446 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
447 fp->rx_buf_size, DMA_FROM_DEVICE);
448 /*
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
452 */
453
454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455 /* Move the BD from the consumer to the producer */
e52fcb24 456 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
457 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458 return;
459 }
9f6c9258 460
e52fcb24
ED
461 /* move empty data from pool to prod */
462 prod_rx_buf->data = first_buf->data;
619c5cb6 463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 464 /* point prod_bd to new data */
9f6c9258
DK
465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
619c5cb6
VZ
468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf = *cons_rx_buf;
470
471 /* mark bin state as START */
472 tpa_info->parsing_flags =
473 le16_to_cpu(cqe->pars_flags.flags);
474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 tpa_info->tpa_state = BNX2X_TPA_START;
476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 tpa_info->placement_offset = cqe->placement_offset;
5495ab75 478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
621b4d66
DK
479 if (fp->mode == TPA_MODE_GRO) {
480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
482 tpa_info->gro_size = gro_size;
483 }
619c5cb6 484
9f6c9258
DK
485#ifdef BNX2X_STOP_ON_ERROR
486 fp->tpa_queue_used |= (1 << queue);
9f6c9258 487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
9f6c9258
DK
488 fp->tpa_queue_used);
489#endif
490}
491
e4e3c02a
VZ
492/* Timestamp option length allowed for TPA aggregation:
493 *
494 * nop nop kind length echo val
495 */
496#define TPA_TSTAMP_OPT_LEN 12
497/**
cbf1de72 498 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 499 *
cbf1de72 500 * @skb: packet skb
e8920674
DK
501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
503 * aggregation.
cbf1de72 504 * @pkt_len: length of all segments
e8920674
DK
505 *
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
2de67439 508 * Compute number of aggregated segments, and gso_type.
e4e3c02a 509 */
cbf1de72 510static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
511 u16 len_on_bd, unsigned int pkt_len,
512 u16 num_of_coalesced_segs)
e4e3c02a 513{
cbf1de72 514 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 515 * other than timestamp or IPv6 extension headers.
e4e3c02a 516 */
619c5cb6
VZ
517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 520 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 521 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 } else {
619c5cb6 524 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526 }
e4e3c02a
VZ
527
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
530 *
531 * Otherwise FW would close the aggregation.
532 */
533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
cbf1de72
YM
536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
540 */
ab5777d7 541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
542}
543
996dedba
MS
544static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 u16 index, gfp_t gfp_mask)
1191cb83 546{
996dedba 547 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
1191cb83
ED
548 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
549 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
550 dma_addr_t mapping;
551
552 if (unlikely(page == NULL)) {
553 BNX2X_ERR("Can't alloc sge\n");
554 return -ENOMEM;
555 }
556
557 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 558 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
559 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
560 __free_pages(page, PAGES_PER_SGE_SHIFT);
561 BNX2X_ERR("Can't map sge\n");
562 return -ENOMEM;
563 }
564
565 sw_buf->page = page;
566 dma_unmap_addr_set(sw_buf, mapping, mapping);
567
568 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
569 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
570
571 return 0;
572}
573
9f6c9258 574static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
575 struct bnx2x_agg_info *tpa_info,
576 u16 pages,
577 struct sk_buff *skb,
619c5cb6
VZ
578 struct eth_end_agg_rx_cqe *cqe,
579 u16 cqe_idx)
9f6c9258
DK
580{
581 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
582 u32 i, frag_len, frag_size;
583 int err, j, frag_id = 0;
619c5cb6 584 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 585 u16 full_page = 0, gro_size = 0;
9f6c9258 586
619c5cb6 587 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
588
589 if (fp->mode == TPA_MODE_GRO) {
590 gro_size = tpa_info->gro_size;
591 full_page = tpa_info->full_page;
592 }
9f6c9258
DK
593
594 /* This is needed in order to enable forwarding support */
cbf1de72
YM
595 if (frag_size)
596 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
597 le16_to_cpu(cqe->pkt_len),
598 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 599
9f6c9258 600#ifdef BNX2X_STOP_ON_ERROR
924d75ab 601 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
602 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
603 pages, cqe_idx);
619c5cb6 604 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
605 bnx2x_panic();
606 return -EINVAL;
607 }
608#endif
609
610 /* Run through the SGL and compose the fragmented skb */
611 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 612 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
613
614 /* FW gives the indices of the SGE as if the ring is an array
615 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
616 if (fp->mode == TPA_MODE_GRO)
617 frag_len = min_t(u32, frag_size, (u32)full_page);
618 else /* LRO */
924d75ab 619 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 620
9f6c9258
DK
621 rx_pg = &fp->rx_page_ring[sge_idx];
622 old_rx_pg = *rx_pg;
623
624 /* If we fail to allocate a substitute page, we simply stop
625 where we are and drop the whole packet */
996dedba 626 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 627 if (unlikely(err)) {
15192a8c 628 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
629 return err;
630 }
631
16a5fd92 632 /* Unmap the page as we're going to pass it to the stack */
9f6c9258
DK
633 dma_unmap_page(&bp->pdev->dev,
634 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 635 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 636 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
637 if (fp->mode == TPA_MODE_LRO)
638 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
639 else { /* GRO */
640 int rem;
641 int offset = 0;
642 for (rem = frag_len; rem > 0; rem -= gro_size) {
643 int len = rem > gro_size ? gro_size : rem;
644 skb_fill_page_desc(skb, frag_id++,
645 old_rx_pg.page, offset, len);
646 if (offset)
647 get_page(old_rx_pg.page);
648 offset += len;
649 }
650 }
9f6c9258
DK
651
652 skb->data_len += frag_len;
924d75ab 653 skb->truesize += SGE_PAGES;
9f6c9258
DK
654 skb->len += frag_len;
655
656 frag_size -= frag_len;
657 }
658
659 return 0;
660}
661
d46d132c
ED
662static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
663{
664 if (fp->rx_frag_size)
665 put_page(virt_to_head_page(data));
666 else
667 kfree(data);
668}
669
996dedba 670static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 671{
996dedba
MS
672 if (fp->rx_frag_size) {
673 /* GFP_KERNEL allocations are used only during initialization */
674 if (unlikely(gfp_mask & __GFP_WAIT))
675 return (void *)__get_free_page(gfp_mask);
676
d46d132c 677 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 678 }
d46d132c 679
996dedba 680 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
681}
682
9969085e
YM
683#ifdef CONFIG_INET
684static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
685{
686 const struct iphdr *iph = ip_hdr(skb);
687 struct tcphdr *th;
688
689 skb_set_transport_header(skb, sizeof(struct iphdr));
690 th = tcp_hdr(skb);
691
692 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
693 iph->saddr, iph->daddr, 0);
694}
695
696static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
697{
698 struct ipv6hdr *iph = ipv6_hdr(skb);
699 struct tcphdr *th;
700
701 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
702 th = tcp_hdr(skb);
703
704 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
705 &iph->saddr, &iph->daddr, 0);
706}
2c2d06d5
YM
707
708static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
709 void (*gro_func)(struct bnx2x*, struct sk_buff*))
710{
711 skb_set_network_header(skb, 0);
712 gro_func(bp, skb);
713 tcp_gro_complete(skb);
714}
9969085e
YM
715#endif
716
717static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
718 struct sk_buff *skb)
719{
720#ifdef CONFIG_INET
cbf1de72 721 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
722 switch (be16_to_cpu(skb->protocol)) {
723 case ETH_P_IP:
2c2d06d5 724 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
725 break;
726 case ETH_P_IPV6:
2c2d06d5 727 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
728 break;
729 default:
2c2d06d5 730 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
731 be16_to_cpu(skb->protocol));
732 }
9969085e
YM
733 }
734#endif
60e66fee 735 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
736 napi_gro_receive(&fp->napi, skb);
737}
738
1191cb83
ED
739static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 struct bnx2x_agg_info *tpa_info,
741 u16 pages,
742 struct eth_end_agg_rx_cqe *cqe,
743 u16 cqe_idx)
9f6c9258 744{
619c5cb6 745 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 746 u8 pad = tpa_info->placement_offset;
619c5cb6 747 u16 len = tpa_info->len_on_bd;
e52fcb24 748 struct sk_buff *skb = NULL;
621b4d66 749 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
750 u8 old_tpa_state = tpa_info->tpa_state;
751
752 tpa_info->tpa_state = BNX2X_TPA_STOP;
753
754 /* If we there was an error during the handling of the TPA_START -
755 * drop this aggregation.
756 */
757 if (old_tpa_state == BNX2X_TPA_ERROR)
758 goto drop;
759
e52fcb24 760 /* Try to allocate the new data */
996dedba 761 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
762 /* Unmap skb in the pool anyway, as we are going to change
763 pool entry status to BNX2X_TPA_STOP even if new skb allocation
764 fails. */
765 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 766 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 767 if (likely(new_data))
d46d132c 768 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 769
e52fcb24 770 if (likely(skb)) {
9f6c9258 771#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 772 if (pad + len > fp->rx_buf_size) {
51c1a580 773 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 774 pad, len, fp->rx_buf_size);
9f6c9258
DK
775 bnx2x_panic();
776 return;
777 }
778#endif
779
e52fcb24 780 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 781 skb_put(skb, len);
5495ab75 782 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
9f6c9258
DK
783
784 skb->protocol = eth_type_trans(skb, bp->dev);
785 skb->ip_summed = CHECKSUM_UNNECESSARY;
786
621b4d66
DK
787 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
788 skb, cqe, cqe_idx)) {
619c5cb6 789 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 790 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 791 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 792 } else {
51c1a580
MS
793 DP(NETIF_MSG_RX_STATUS,
794 "Failed to allocate new pages - dropping packet!\n");
40955532 795 dev_kfree_skb_any(skb);
9f6c9258
DK
796 }
797
e52fcb24
ED
798 /* put new data in bin */
799 rx_buf->data = new_data;
9f6c9258 800
619c5cb6 801 return;
9f6c9258 802 }
07b0f009
ED
803 if (new_data)
804 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
805drop:
806 /* drop the packet and keep the buffer in the bin */
807 DP(NETIF_MSG_RX_STATUS,
808 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 809 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
810}
811
996dedba
MS
812static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
813 u16 index, gfp_t gfp_mask)
1191cb83
ED
814{
815 u8 *data;
816 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
817 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
818 dma_addr_t mapping;
819
996dedba 820 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
821 if (unlikely(data == NULL))
822 return -ENOMEM;
823
824 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
825 fp->rx_buf_size,
826 DMA_FROM_DEVICE);
827 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 828 bnx2x_frag_free(fp, data);
1191cb83
ED
829 BNX2X_ERR("Can't map rx data\n");
830 return -ENOMEM;
831 }
832
833 rx_buf->data = data;
834 dma_unmap_addr_set(rx_buf, mapping, mapping);
835
836 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
837 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
838
839 return 0;
840}
841
15192a8c
BW
842static
843void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
844 struct bnx2x_fastpath *fp,
845 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 846{
e488921f
MS
847 /* Do nothing if no L4 csum validation was done.
848 * We do not check whether IP csum was validated. For IPv4 we assume
849 * that if the card got as far as validating the L4 csum, it also
850 * validated the IP csum. IPv6 has no IP csum.
851 */
d6cb3e41 852 if (cqe->fast_path_cqe.status_flags &
e488921f 853 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
854 return;
855
e488921f 856 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
857
858 if (cqe->fast_path_cqe.type_error_flags &
859 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
860 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 861 qstats->hw_csum_err++;
d6cb3e41
ED
862 else
863 skb->ip_summed = CHECKSUM_UNNECESSARY;
864}
9f6c9258 865
a8f47eb7 866static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
9f6c9258
DK
867{
868 struct bnx2x *bp = fp->bp;
869 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 870 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 871 int rx_pkt = 0;
75b29459
DK
872 union eth_rx_cqe *cqe;
873 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
874
875#ifdef BNX2X_STOP_ON_ERROR
876 if (unlikely(bp->panic))
877 return 0;
878#endif
b3529744
EB
879 if (budget <= 0)
880 return rx_pkt;
9f6c9258 881
9f6c9258
DK
882 bd_cons = fp->rx_bd_cons;
883 bd_prod = fp->rx_bd_prod;
884 bd_prod_fw = bd_prod;
885 sw_comp_cons = fp->rx_comp_cons;
886 sw_comp_prod = fp->rx_comp_prod;
887
75b29459
DK
888 comp_ring_cons = RCQ_BD(sw_comp_cons);
889 cqe = &fp->rx_comp_ring[comp_ring_cons];
890 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
891
892 DP(NETIF_MSG_RX_STATUS,
75b29459 893 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 894
75b29459 895 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
896 struct sw_rx_bd *rx_buf = NULL;
897 struct sk_buff *skb;
9f6c9258 898 u8 cqe_fp_flags;
619c5cb6 899 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 900 u16 len, pad, queue;
e52fcb24 901 u8 *data;
bd5cef03 902 u32 rxhash;
5495ab75 903 enum pkt_hash_types rxhash_type;
9f6c9258 904
619c5cb6
VZ
905#ifdef BNX2X_STOP_ON_ERROR
906 if (unlikely(bp->panic))
907 return 0;
908#endif
909
9f6c9258
DK
910 bd_prod = RX_BD(bd_prod);
911 bd_cons = RX_BD(bd_cons);
912
9aaae044 913 /* A rmb() is required to ensure that the CQE is not read
914 * before it is written by the adapter DMA. PCI ordering
915 * rules will make sure the other fields are written before
916 * the marker at the end of struct eth_fast_path_rx_cqe
917 * but without rmb() a weakly ordered processor can process
918 * stale data. Without the barrier TPA state-machine might
919 * enter inconsistent state and kernel stack might be
920 * provided with incorrect packet description - these lead
921 * to various kernel crashed.
922 */
923 rmb();
924
619c5cb6
VZ
925 cqe_fp_flags = cqe_fp->type_error_flags;
926 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 927
51c1a580
MS
928 DP(NETIF_MSG_RX_STATUS,
929 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
930 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
931 cqe_fp_flags, cqe_fp->status_flags,
932 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
933 le16_to_cpu(cqe_fp->vlan_tag),
934 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
935
936 /* is this a slowpath msg? */
619c5cb6 937 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
938 bnx2x_sp_event(fp, cqe);
939 goto next_cqe;
e52fcb24 940 }
621b4d66 941
e52fcb24
ED
942 rx_buf = &fp->rx_buf_ring[bd_cons];
943 data = rx_buf->data;
9f6c9258 944
e52fcb24 945 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
946 struct bnx2x_agg_info *tpa_info;
947 u16 frag_size, pages;
619c5cb6 948#ifdef BNX2X_STOP_ON_ERROR
e52fcb24 949 /* sanity check */
7e6b4d44 950 if (fp->mode == TPA_MODE_DISABLED &&
e52fcb24
ED
951 (CQE_TYPE_START(cqe_fp_type) ||
952 CQE_TYPE_STOP(cqe_fp_type)))
7e6b4d44 953 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
e52fcb24 954 CQE_TYPE(cqe_fp_type));
619c5cb6 955#endif
9f6c9258 956
e52fcb24
ED
957 if (CQE_TYPE_START(cqe_fp_type)) {
958 u16 queue = cqe_fp->queue_index;
959 DP(NETIF_MSG_RX_STATUS,
960 "calling tpa_start on queue %d\n",
961 queue);
9f6c9258 962
e52fcb24
ED
963 bnx2x_tpa_start(fp, queue,
964 bd_cons, bd_prod,
965 cqe_fp);
621b4d66 966
e52fcb24 967 goto next_rx;
621b4d66
DK
968 }
969 queue = cqe->end_agg_cqe.queue_index;
970 tpa_info = &fp->tpa_info[queue];
971 DP(NETIF_MSG_RX_STATUS,
972 "calling tpa_stop on queue %d\n",
973 queue);
974
975 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
976 tpa_info->len_on_bd;
977
978 if (fp->mode == TPA_MODE_GRO)
979 pages = (frag_size + tpa_info->full_page - 1) /
980 tpa_info->full_page;
981 else
982 pages = SGE_PAGE_ALIGN(frag_size) >>
983 SGE_PAGE_SHIFT;
984
985 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
986 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 987#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
988 if (bp->panic)
989 return 0;
9f6c9258
DK
990#endif
991
621b4d66
DK
992 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
993 goto next_cqe;
e52fcb24
ED
994 }
995 /* non TPA */
621b4d66 996 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
997 pad = cqe_fp->placement_offset;
998 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 999 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
1000 pad + RX_COPY_THRESH,
1001 DMA_FROM_DEVICE);
1002 pad += NET_SKB_PAD;
1003 prefetch(data + pad); /* speedup eth_type_trans() */
1004 /* is this an error packet? */
1005 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 1006 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
1007 "ERROR flags %x rx packet %u\n",
1008 cqe_fp_flags, sw_comp_cons);
15192a8c 1009 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
1010 goto reuse_rx;
1011 }
9f6c9258 1012
e52fcb24
ED
1013 /* Since we don't have a jumbo ring
1014 * copy small packets if mtu > 1500
1015 */
1016 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1017 (len <= RX_COPY_THRESH)) {
45abfb10 1018 skb = napi_alloc_skb(&fp->napi, len);
e52fcb24 1019 if (skb == NULL) {
51c1a580 1020 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 1021 "ERROR packet dropped because of alloc failure\n");
15192a8c 1022 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
1023 goto reuse_rx;
1024 }
e52fcb24
ED
1025 memcpy(skb->data, data + pad, len);
1026 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1027 } else {
996dedba
MS
1028 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1029 GFP_ATOMIC) == 0)) {
9f6c9258 1030 dma_unmap_single(&bp->pdev->dev,
e52fcb24 1031 dma_unmap_addr(rx_buf, mapping),
a8c94b91 1032 fp->rx_buf_size,
9f6c9258 1033 DMA_FROM_DEVICE);
d46d132c 1034 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 1035 if (unlikely(!skb)) {
d46d132c 1036 bnx2x_frag_free(fp, data);
15192a8c
BW
1037 bnx2x_fp_qstats(bp, fp)->
1038 rx_skb_alloc_failed++;
e52fcb24
ED
1039 goto next_rx;
1040 }
9f6c9258 1041 skb_reserve(skb, pad);
9f6c9258 1042 } else {
51c1a580
MS
1043 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1044 "ERROR packet dropped because of alloc failure\n");
15192a8c 1045 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 1046reuse_rx:
e52fcb24 1047 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
1048 goto next_rx;
1049 }
036d2df9 1050 }
9f6c9258 1051
036d2df9
DK
1052 skb_put(skb, len);
1053 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 1054
036d2df9 1055 /* Set Toeplitz hash for a none-LRO skb */
5495ab75
TH
1056 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1057 skb_set_hash(skb, rxhash, rxhash_type);
9f6c9258 1058
036d2df9 1059 skb_checksum_none_assert(skb);
f85582f8 1060
d6cb3e41 1061 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
1062 bnx2x_csum_validate(skb, cqe, fp,
1063 bnx2x_fp_qstats(bp, fp));
9f6c9258 1064
f233cafe 1065 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1066
eeed018c 1067 /* Check if this packet was timestamped */
56daf66d 1068 if (unlikely(cqe->fast_path_cqe.type_error_flags &
eeed018c
MK
1069 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1070 bnx2x_set_rx_ts(bp, skb);
1071
619c5cb6
VZ
1072 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1073 PARSING_FLAGS_VLAN)
86a9bad3 1074 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1075 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1076
8b80cda5 1077 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1078
1079 if (bnx2x_fp_ll_polling(fp))
1080 netif_receive_skb(skb);
1081 else
1082 napi_gro_receive(&fp->napi, skb);
9f6c9258 1083next_rx:
e52fcb24 1084 rx_buf->data = NULL;
9f6c9258
DK
1085
1086 bd_cons = NEXT_RX_IDX(bd_cons);
1087 bd_prod = NEXT_RX_IDX(bd_prod);
1088 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1089 rx_pkt++;
1090next_cqe:
1091 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1092 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1093
75b29459
DK
1094 /* mark CQE as free */
1095 BNX2X_SEED_CQE(cqe_fp);
1096
9f6c9258
DK
1097 if (rx_pkt == budget)
1098 break;
75b29459
DK
1099
1100 comp_ring_cons = RCQ_BD(sw_comp_cons);
1101 cqe = &fp->rx_comp_ring[comp_ring_cons];
1102 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1103 } /* while */
1104
1105 fp->rx_bd_cons = bd_cons;
1106 fp->rx_bd_prod = bd_prod_fw;
1107 fp->rx_comp_cons = sw_comp_cons;
1108 fp->rx_comp_prod = sw_comp_prod;
1109
1110 /* Update producers */
1111 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1112 fp->rx_sge_prod);
1113
1114 fp->rx_pkt += rx_pkt;
1115 fp->rx_calls++;
1116
1117 return rx_pkt;
1118}
1119
1120static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1121{
1122 struct bnx2x_fastpath *fp = fp_cookie;
1123 struct bnx2x *bp = fp->bp;
6383c0b3 1124 u8 cos;
9f6c9258 1125
51c1a580
MS
1126 DP(NETIF_MSG_INTR,
1127 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1128 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1129
523224a3 1130 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1131
1132#ifdef BNX2X_STOP_ON_ERROR
1133 if (unlikely(bp->panic))
1134 return IRQ_HANDLED;
1135#endif
1136
1137 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1138 for_each_cos_in_tx_queue(fp, cos)
65565884 1139 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1140
523224a3 1141 prefetch(&fp->sb_running_index[SM_RX_ID]);
f5fbf115 1142 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
9f6c9258
DK
1143
1144 return IRQ_HANDLED;
1145}
1146
9f6c9258
DK
1147/* HW Lock for shared dual port PHYs */
1148void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1149{
1150 mutex_lock(&bp->port.phy_mutex);
1151
8203c4b6 1152 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1153}
1154
1155void bnx2x_release_phy_lock(struct bnx2x *bp)
1156{
8203c4b6 1157 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1158
1159 mutex_unlock(&bp->port.phy_mutex);
1160}
1161
0793f83f
DK
1162/* calculates MF speed according to current linespeed and MF configuration */
1163u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1164{
1165 u16 line_speed = bp->link_vars.line_speed;
1166 if (IS_MF(bp)) {
faa6fcbb
DK
1167 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1168 bp->mf_config[BP_VN(bp)]);
1169
1170 /* Calculate the current MAX line speed limit for the MF
1171 * devices
0793f83f 1172 */
faa6fcbb
DK
1173 if (IS_MF_SI(bp))
1174 line_speed = (line_speed * maxCfg) / 100;
1175 else { /* SD mode */
0793f83f
DK
1176 u16 vn_max_rate = maxCfg * 100;
1177
1178 if (vn_max_rate < line_speed)
1179 line_speed = vn_max_rate;
faa6fcbb 1180 }
0793f83f
DK
1181 }
1182
1183 return line_speed;
1184}
1185
2ae17f66
VZ
1186/**
1187 * bnx2x_fill_report_data - fill link report data to report
1188 *
1189 * @bp: driver handle
1190 * @data: link state to update
1191 *
1192 * It uses a none-atomic bit operations because is called under the mutex.
1193 */
1191cb83
ED
1194static void bnx2x_fill_report_data(struct bnx2x *bp,
1195 struct bnx2x_link_report_data *data)
2ae17f66 1196{
2ae17f66
VZ
1197 memset(data, 0, sizeof(*data));
1198
6495d15a
DK
1199 if (IS_PF(bp)) {
1200 /* Fill the report data: effective line speed */
1201 data->line_speed = bnx2x_get_mf_speed(bp);
1202
1203 /* Link is down */
1204 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1205 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1206 &data->link_report_flags);
1207
1208 if (!BNX2X_NUM_ETH_QUEUES(bp))
1209 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1210 &data->link_report_flags);
1211
1212 /* Full DUPLEX */
1213 if (bp->link_vars.duplex == DUPLEX_FULL)
1214 __set_bit(BNX2X_LINK_REPORT_FD,
1215 &data->link_report_flags);
1216
1217 /* Rx Flow Control is ON */
1218 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1219 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1220 &data->link_report_flags);
1221
1222 /* Tx Flow Control is ON */
1223 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1224 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1225 &data->link_report_flags);
1226 } else { /* VF */
1227 *data = bp->vf_link_vars;
1228 }
2ae17f66
VZ
1229}
1230
1231/**
1232 * bnx2x_link_report - report link status to OS.
1233 *
1234 * @bp: driver handle
1235 *
1236 * Calls the __bnx2x_link_report() under the same locking scheme
1237 * as a link/PHY state managing code to ensure a consistent link
1238 * reporting.
1239 */
1240
9f6c9258
DK
1241void bnx2x_link_report(struct bnx2x *bp)
1242{
2ae17f66
VZ
1243 bnx2x_acquire_phy_lock(bp);
1244 __bnx2x_link_report(bp);
1245 bnx2x_release_phy_lock(bp);
1246}
9f6c9258 1247
2ae17f66
VZ
1248/**
1249 * __bnx2x_link_report - report link status to OS.
1250 *
1251 * @bp: driver handle
1252 *
16a5fd92 1253 * None atomic implementation.
2ae17f66
VZ
1254 * Should be called under the phy_lock.
1255 */
1256void __bnx2x_link_report(struct bnx2x *bp)
1257{
1258 struct bnx2x_link_report_data cur_data;
9f6c9258 1259
2ae17f66 1260 /* reread mf_cfg */
ad5afc89 1261 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1262 bnx2x_read_mf_cfg(bp);
1263
1264 /* Read the current link report info */
1265 bnx2x_fill_report_data(bp, &cur_data);
1266
1267 /* Don't report link down or exactly the same link status twice */
1268 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1269 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1270 &bp->last_reported_link.link_report_flags) &&
1271 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1272 &cur_data.link_report_flags)))
1273 return;
1274
1275 bp->link_cnt++;
9f6c9258 1276
2ae17f66
VZ
1277 /* We are going to report a new link parameters now -
1278 * remember the current data for the next time.
1279 */
1280 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1281
6495d15a
DK
1282 /* propagate status to VFs */
1283 if (IS_PF(bp))
1284 bnx2x_iov_link_update(bp);
1285
2ae17f66
VZ
1286 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1287 &cur_data.link_report_flags)) {
1288 netif_carrier_off(bp->dev);
1289 netdev_err(bp->dev, "NIC Link is Down\n");
1290 return;
1291 } else {
94f05b0f
JP
1292 const char *duplex;
1293 const char *flow;
1294
2ae17f66 1295 netif_carrier_on(bp->dev);
9f6c9258 1296
2ae17f66
VZ
1297 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1298 &cur_data.link_report_flags))
94f05b0f 1299 duplex = "full";
9f6c9258 1300 else
94f05b0f 1301 duplex = "half";
9f6c9258 1302
2ae17f66
VZ
1303 /* Handle the FC at the end so that only these flags would be
1304 * possibly set. This way we may easily check if there is no FC
1305 * enabled.
1306 */
1307 if (cur_data.link_report_flags) {
1308 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1309 &cur_data.link_report_flags)) {
2ae17f66
VZ
1310 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1311 &cur_data.link_report_flags))
94f05b0f
JP
1312 flow = "ON - receive & transmit";
1313 else
1314 flow = "ON - receive";
9f6c9258 1315 } else {
94f05b0f 1316 flow = "ON - transmit";
9f6c9258 1317 }
94f05b0f
JP
1318 } else {
1319 flow = "none";
9f6c9258 1320 }
94f05b0f
JP
1321 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1322 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1323 }
1324}
1325
1191cb83
ED
1326static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1327{
1328 int i;
1329
1330 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1331 struct eth_rx_sge *sge;
1332
1333 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1334 sge->addr_hi =
1335 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1336 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1337
1338 sge->addr_lo =
1339 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1340 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1341 }
1342}
1343
1344static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1345 struct bnx2x_fastpath *fp, int last)
1346{
1347 int i;
1348
1349 for (i = 0; i < last; i++) {
1350 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1351 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1352 u8 *data = first_buf->data;
1353
1354 if (data == NULL) {
1355 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1356 continue;
1357 }
1358 if (tpa_info->tpa_state == BNX2X_TPA_START)
1359 dma_unmap_single(&bp->pdev->dev,
1360 dma_unmap_addr(first_buf, mapping),
1361 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1362 bnx2x_frag_free(fp, data);
1191cb83
ED
1363 first_buf->data = NULL;
1364 }
1365}
1366
55c11941
MS
1367void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1368{
1369 int j;
1370
1371 for_each_rx_queue_cnic(bp, j) {
1372 struct bnx2x_fastpath *fp = &bp->fp[j];
1373
1374 fp->rx_bd_cons = 0;
1375
1376 /* Activate BD ring */
1377 /* Warning!
1378 * this will generate an interrupt (to the TSTORM)
1379 * must only be done after chip is initialized
1380 */
1381 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1382 fp->rx_sge_prod);
1383 }
1384}
1385
9f6c9258
DK
1386void bnx2x_init_rx_rings(struct bnx2x *bp)
1387{
1388 int func = BP_FUNC(bp);
523224a3 1389 u16 ring_prod;
9f6c9258 1390 int i, j;
25141580 1391
b3b83c3f 1392 /* Allocate TPA resources */
55c11941 1393 for_each_eth_queue(bp, j) {
523224a3 1394 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1395
a8c94b91
VZ
1396 DP(NETIF_MSG_IFUP,
1397 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1398
7e6b4d44 1399 if (fp->mode != TPA_MODE_DISABLED) {
16a5fd92 1400 /* Fill the per-aggregation pool */
dfacf138 1401 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1402 struct bnx2x_agg_info *tpa_info =
1403 &fp->tpa_info[i];
1404 struct sw_rx_bd *first_buf =
1405 &tpa_info->first_buf;
1406
996dedba
MS
1407 first_buf->data =
1408 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1409 if (!first_buf->data) {
51c1a580
MS
1410 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1411 j);
9f6c9258 1412 bnx2x_free_tpa_pool(bp, fp, i);
7e6b4d44 1413 fp->mode = TPA_MODE_DISABLED;
9f6c9258
DK
1414 break;
1415 }
619c5cb6
VZ
1416 dma_unmap_addr_set(first_buf, mapping, 0);
1417 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1418 }
523224a3
DK
1419
1420 /* "next page" elements initialization */
1421 bnx2x_set_next_page_sgl(fp);
1422
1423 /* set SGEs bit mask */
1424 bnx2x_init_sge_ring_bit_mask(fp);
1425
1426 /* Allocate SGEs and initialize the ring elements */
1427 for (i = 0, ring_prod = 0;
1428 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1429
996dedba
MS
1430 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1431 GFP_KERNEL) < 0) {
51c1a580
MS
1432 BNX2X_ERR("was only able to allocate %d rx sges\n",
1433 i);
1434 BNX2X_ERR("disabling TPA for queue[%d]\n",
1435 j);
523224a3 1436 /* Cleanup already allocated elements */
619c5cb6
VZ
1437 bnx2x_free_rx_sge_range(bp, fp,
1438 ring_prod);
1439 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1440 MAX_AGG_QS(bp));
7e6b4d44 1441 fp->mode = TPA_MODE_DISABLED;
523224a3
DK
1442 ring_prod = 0;
1443 break;
1444 }
1445 ring_prod = NEXT_SGE_IDX(ring_prod);
1446 }
1447
1448 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1449 }
1450 }
1451
55c11941 1452 for_each_eth_queue(bp, j) {
9f6c9258
DK
1453 struct bnx2x_fastpath *fp = &bp->fp[j];
1454
1455 fp->rx_bd_cons = 0;
9f6c9258 1456
b3b83c3f
DK
1457 /* Activate BD ring */
1458 /* Warning!
1459 * this will generate an interrupt (to the TSTORM)
1460 * must only be done after chip is initialized
1461 */
1462 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1463 fp->rx_sge_prod);
9f6c9258 1464
9f6c9258
DK
1465 if (j != 0)
1466 continue;
1467
619c5cb6 1468 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1469 REG_WR(bp, BAR_USTRORM_INTMEM +
1470 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1471 U64_LO(fp->rx_comp_mapping));
1472 REG_WR(bp, BAR_USTRORM_INTMEM +
1473 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1474 U64_HI(fp->rx_comp_mapping));
1475 }
9f6c9258
DK
1476 }
1477}
f85582f8 1478
55c11941 1479static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1480{
6383c0b3 1481 u8 cos;
55c11941 1482 struct bnx2x *bp = fp->bp;
9f6c9258 1483
55c11941
MS
1484 for_each_cos_in_tx_queue(fp, cos) {
1485 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1486 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1487
55c11941
MS
1488 u16 sw_prod = txdata->tx_pkt_prod;
1489 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1490
55c11941
MS
1491 while (sw_cons != sw_prod) {
1492 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1493 &pkts_compl, &bytes_compl);
1494 sw_cons++;
9f6c9258 1495 }
55c11941
MS
1496
1497 netdev_tx_reset_queue(
1498 netdev_get_tx_queue(bp->dev,
1499 txdata->txq_index));
1500 }
1501}
1502
1503static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1504{
1505 int i;
1506
1507 for_each_tx_queue_cnic(bp, i) {
1508 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1509 }
1510}
1511
1512static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1513{
1514 int i;
1515
1516 for_each_eth_queue(bp, i) {
1517 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1518 }
1519}
1520
b3b83c3f
DK
1521static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1522{
1523 struct bnx2x *bp = fp->bp;
1524 int i;
1525
1526 /* ring wasn't allocated */
1527 if (fp->rx_buf_ring == NULL)
1528 return;
1529
1530 for (i = 0; i < NUM_RX_BD; i++) {
1531 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1532 u8 *data = rx_buf->data;
b3b83c3f 1533
e52fcb24 1534 if (data == NULL)
b3b83c3f 1535 continue;
b3b83c3f
DK
1536 dma_unmap_single(&bp->pdev->dev,
1537 dma_unmap_addr(rx_buf, mapping),
1538 fp->rx_buf_size, DMA_FROM_DEVICE);
1539
e52fcb24 1540 rx_buf->data = NULL;
d46d132c 1541 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1542 }
1543}
1544
55c11941
MS
1545static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1546{
1547 int j;
1548
1549 for_each_rx_queue_cnic(bp, j) {
1550 bnx2x_free_rx_bds(&bp->fp[j]);
1551 }
1552}
1553
9f6c9258
DK
1554static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1555{
b3b83c3f 1556 int j;
9f6c9258 1557
55c11941 1558 for_each_eth_queue(bp, j) {
9f6c9258
DK
1559 struct bnx2x_fastpath *fp = &bp->fp[j];
1560
b3b83c3f 1561 bnx2x_free_rx_bds(fp);
9f6c9258 1562
7e6b4d44 1563 if (fp->mode != TPA_MODE_DISABLED)
dfacf138 1564 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1565 }
1566}
1567
a8f47eb7 1568static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
55c11941
MS
1569{
1570 bnx2x_free_tx_skbs_cnic(bp);
1571 bnx2x_free_rx_skbs_cnic(bp);
1572}
1573
9f6c9258
DK
1574void bnx2x_free_skbs(struct bnx2x *bp)
1575{
1576 bnx2x_free_tx_skbs(bp);
1577 bnx2x_free_rx_skbs(bp);
1578}
1579
e3835b99
DK
1580void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1581{
1582 /* load old values */
1583 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1584
1585 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1586 /* leave all but MAX value */
1587 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1588
1589 /* set new MAX value */
1590 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1591 & FUNC_MF_CFG_MAX_BW_MASK;
1592
1593 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1594 }
1595}
1596
ca92429f
DK
1597/**
1598 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1599 *
1600 * @bp: driver handle
1601 * @nvecs: number of vectors to be released
1602 */
1603static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1604{
ca92429f 1605 int i, offset = 0;
9f6c9258 1606
ca92429f
DK
1607 if (nvecs == offset)
1608 return;
ad5afc89
AE
1609
1610 /* VFs don't have a default SB */
1611 if (IS_PF(bp)) {
1612 free_irq(bp->msix_table[offset].vector, bp->dev);
1613 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1614 bp->msix_table[offset].vector);
1615 offset++;
1616 }
55c11941
MS
1617
1618 if (CNIC_SUPPORT(bp)) {
1619 if (nvecs == offset)
1620 return;
1621 offset++;
1622 }
ca92429f 1623
ec6ba945 1624 for_each_eth_queue(bp, i) {
ca92429f
DK
1625 if (nvecs == offset)
1626 return;
51c1a580
MS
1627 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1628 i, bp->msix_table[offset].vector);
9f6c9258 1629
ca92429f 1630 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1631 }
1632}
1633
d6214d7a 1634void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1635{
30a5de77 1636 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1637 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1638 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1639
1640 /* vfs don't have a default status block */
1641 if (IS_PF(bp))
1642 nvecs++;
1643
1644 bnx2x_free_msix_irqs(bp, nvecs);
1645 } else {
30a5de77 1646 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1647 }
9f6c9258
DK
1648}
1649
0e8d2ec5 1650int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1651{
1ab4434c 1652 int msix_vec = 0, i, rc;
9f6c9258 1653
1ab4434c
AE
1654 /* VFs don't have a default status block */
1655 if (IS_PF(bp)) {
1656 bp->msix_table[msix_vec].entry = msix_vec;
1657 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1658 bp->msix_table[0].entry);
1659 msix_vec++;
1660 }
9f6c9258 1661
55c11941
MS
1662 /* Cnic requires an msix vector for itself */
1663 if (CNIC_SUPPORT(bp)) {
1664 bp->msix_table[msix_vec].entry = msix_vec;
1665 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1666 msix_vec, bp->msix_table[msix_vec].entry);
1667 msix_vec++;
1668 }
1669
6383c0b3 1670 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1671 for_each_eth_queue(bp, i) {
d6214d7a 1672 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1673 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1674 msix_vec, msix_vec, i);
d6214d7a 1675 msix_vec++;
9f6c9258
DK
1676 }
1677
1ab4434c
AE
1678 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1679 msix_vec);
d6214d7a 1680
a5444b17
AG
1681 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1682 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
9f6c9258
DK
1683 /*
1684 * reconfigure number of tx/rx queues according to available
1685 * MSI-X vectors
1686 */
a5444b17 1687 if (rc == -ENOSPC) {
30a5de77 1688 /* Get by with single vector */
a5444b17
AG
1689 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1690 if (rc < 0) {
30a5de77
DK
1691 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1692 rc);
1693 goto no_msix;
1694 }
1695
1696 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1697 bp->flags |= USING_SINGLE_MSIX_FLAG;
1698
55c11941
MS
1699 BNX2X_DEV_INFO("set number of queues to 1\n");
1700 bp->num_ethernet_queues = 1;
1701 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1702 } else if (rc < 0) {
a5444b17 1703 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1704 goto no_msix;
a5444b17
AG
1705 } else if (rc < msix_vec) {
1706 /* how less vectors we will have? */
1707 int diff = msix_vec - rc;
1708
1709 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1710
1711 /*
1712 * decrease number of queues by number of unallocated entries
1713 */
1714 bp->num_ethernet_queues -= diff;
1715 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1716
1717 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1718 bp->num_queues);
9f6c9258
DK
1719 }
1720
1721 bp->flags |= USING_MSIX_FLAG;
1722
1723 return 0;
30a5de77
DK
1724
1725no_msix:
1726 /* fall to INTx if not enough memory */
1727 if (rc == -ENOMEM)
1728 bp->flags |= DISABLE_MSI_FLAG;
1729
1730 return rc;
9f6c9258
DK
1731}
1732
1733static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1734{
ca92429f 1735 int i, rc, offset = 0;
9f6c9258 1736
ad5afc89
AE
1737 /* no default status block for vf */
1738 if (IS_PF(bp)) {
1739 rc = request_irq(bp->msix_table[offset++].vector,
1740 bnx2x_msix_sp_int, 0,
1741 bp->dev->name, bp->dev);
1742 if (rc) {
1743 BNX2X_ERR("request sp irq failed\n");
1744 return -EBUSY;
1745 }
9f6c9258
DK
1746 }
1747
55c11941
MS
1748 if (CNIC_SUPPORT(bp))
1749 offset++;
1750
ec6ba945 1751 for_each_eth_queue(bp, i) {
9f6c9258
DK
1752 struct bnx2x_fastpath *fp = &bp->fp[i];
1753 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1754 bp->dev->name, i);
1755
d6214d7a 1756 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1757 bnx2x_msix_fp_int, 0, fp->name, fp);
1758 if (rc) {
ca92429f
DK
1759 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1760 bp->msix_table[offset].vector, rc);
1761 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1762 return -EBUSY;
1763 }
1764
d6214d7a 1765 offset++;
9f6c9258
DK
1766 }
1767
ec6ba945 1768 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1769 if (IS_PF(bp)) {
1770 offset = 1 + CNIC_SUPPORT(bp);
1771 netdev_info(bp->dev,
1772 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1773 bp->msix_table[0].vector,
1774 0, bp->msix_table[offset].vector,
1775 i - 1, bp->msix_table[offset + i - 1].vector);
1776 } else {
1777 offset = CNIC_SUPPORT(bp);
1778 netdev_info(bp->dev,
1779 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1780 0, bp->msix_table[offset].vector,
1781 i - 1, bp->msix_table[offset + i - 1].vector);
1782 }
9f6c9258
DK
1783 return 0;
1784}
1785
d6214d7a 1786int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1787{
1788 int rc;
1789
1790 rc = pci_enable_msi(bp->pdev);
1791 if (rc) {
51c1a580 1792 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1793 return -1;
1794 }
1795 bp->flags |= USING_MSI_FLAG;
1796
1797 return 0;
1798}
1799
1800static int bnx2x_req_irq(struct bnx2x *bp)
1801{
1802 unsigned long flags;
30a5de77 1803 unsigned int irq;
9f6c9258 1804
30a5de77 1805 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1806 flags = 0;
1807 else
1808 flags = IRQF_SHARED;
1809
30a5de77
DK
1810 if (bp->flags & USING_MSIX_FLAG)
1811 irq = bp->msix_table[0].vector;
1812 else
1813 irq = bp->pdev->irq;
1814
1815 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1816}
1817
c957d09f 1818static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1819{
1820 int rc = 0;
30a5de77
DK
1821 if (bp->flags & USING_MSIX_FLAG &&
1822 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1823 rc = bnx2x_req_msix_irqs(bp);
1824 if (rc)
1825 return rc;
1826 } else {
619c5cb6
VZ
1827 rc = bnx2x_req_irq(bp);
1828 if (rc) {
1829 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1830 return rc;
1831 }
1832 if (bp->flags & USING_MSI_FLAG) {
1833 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1834 netdev_info(bp->dev, "using MSI IRQ %d\n",
1835 bp->dev->irq);
1836 }
1837 if (bp->flags & USING_MSIX_FLAG) {
1838 bp->dev->irq = bp->msix_table[0].vector;
1839 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1840 bp->dev->irq);
619c5cb6
VZ
1841 }
1842 }
1843
1844 return 0;
1845}
1846
55c11941
MS
1847static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1848{
1849 int i;
1850
8f20aa57 1851 for_each_rx_queue_cnic(bp, i) {
074975d0 1852 bnx2x_fp_busy_poll_init(&bp->fp[i]);
55c11941 1853 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1854 }
55c11941
MS
1855}
1856
1191cb83 1857static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1858{
1859 int i;
1860
8f20aa57 1861 for_each_eth_queue(bp, i) {
074975d0 1862 bnx2x_fp_busy_poll_init(&bp->fp[i]);
9f6c9258 1863 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1864 }
9f6c9258
DK
1865}
1866
55c11941
MS
1867static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1868{
1869 int i;
1870
8f20aa57 1871 for_each_rx_queue_cnic(bp, i) {
55c11941 1872 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1873 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1874 usleep_range(1000, 2000);
8f20aa57 1875 }
55c11941
MS
1876}
1877
1191cb83 1878static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1879{
1880 int i;
1881
8f20aa57 1882 for_each_eth_queue(bp, i) {
9f6c9258 1883 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1884 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1885 usleep_range(1000, 2000);
8f20aa57 1886 }
9f6c9258
DK
1887}
1888
1889void bnx2x_netif_start(struct bnx2x *bp)
1890{
4b7ed897
DK
1891 if (netif_running(bp->dev)) {
1892 bnx2x_napi_enable(bp);
55c11941
MS
1893 if (CNIC_LOADED(bp))
1894 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1895 bnx2x_int_enable(bp);
1896 if (bp->state == BNX2X_STATE_OPEN)
1897 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1898 }
1899}
1900
1901void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1902{
1903 bnx2x_int_disable_sync(bp, disable_hw);
1904 bnx2x_napi_disable(bp);
55c11941
MS
1905 if (CNIC_LOADED(bp))
1906 bnx2x_napi_disable_cnic(bp);
9f6c9258 1907}
9f6c9258 1908
f663dd9a 1909u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
99932d4f 1910 void *accel_priv, select_queue_fallback_t fallback)
8307fa3e 1911{
8307fa3e 1912 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1913
55c11941 1914 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1915 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1916 u16 ether_type = ntohs(hdr->h_proto);
1917
1918 /* Skip VLAN tag if present */
1919 if (ether_type == ETH_P_8021Q) {
1920 struct vlan_ethhdr *vhdr =
1921 (struct vlan_ethhdr *)skb->data;
1922
1923 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1924 }
1925
1926 /* If ethertype is FCoE or FIP - use FCoE ring */
1927 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1928 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1929 }
55c11941 1930
cdb9d6ae 1931 /* select a non-FCoE queue */
99932d4f 1932 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1933}
1934
d6214d7a
DK
1935void bnx2x_set_num_queues(struct bnx2x *bp)
1936{
96305234 1937 /* RSS queues */
55c11941 1938 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1939
a3348722 1940 /* override in STORAGE SD modes */
2e98ffc2 1941 if (IS_MF_STORAGE_ONLY(bp))
55c11941
MS
1942 bp->num_ethernet_queues = 1;
1943
ec6ba945 1944 /* Add special queues */
55c11941
MS
1945 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1946 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1947
1948 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1949}
1950
cdb9d6ae
VZ
1951/**
1952 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1953 *
1954 * @bp: Driver handle
1955 *
1956 * We currently support for at most 16 Tx queues for each CoS thus we will
1957 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1958 * bp->max_cos.
1959 *
1960 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1961 * index after all ETH L2 indices.
1962 *
1963 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1964 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1965 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1966 *
1967 * The proper configuration of skb->queue_mapping is handled by
1968 * bnx2x_select_queue() and __skb_tx_hash().
1969 *
1970 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1971 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1972 */
55c11941 1973static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1974{
6383c0b3 1975 int rc, tx, rx;
ec6ba945 1976
65565884 1977 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1978 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1979
6383c0b3 1980/* account for fcoe queue */
55c11941
MS
1981 if (include_cnic && !NO_FCOE(bp)) {
1982 rx++;
1983 tx++;
6383c0b3 1984 }
6383c0b3
AE
1985
1986 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1987 if (rc) {
1988 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1989 return rc;
1990 }
1991 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1992 if (rc) {
1993 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1994 return rc;
1995 }
1996
51c1a580 1997 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1998 tx, rx);
1999
ec6ba945
VZ
2000 return rc;
2001}
2002
1191cb83 2003static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
2004{
2005 int i;
2006
2007 for_each_queue(bp, i) {
2008 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 2009 u32 mtu;
a8c94b91
VZ
2010
2011 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2012 if (IS_FCOE_IDX(i))
2013 /*
2014 * Although there are no IP frames expected to arrive to
2015 * this ring we still want to add an
2016 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2017 * overrun attack.
2018 */
e52fcb24 2019 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 2020 else
e52fcb24
ED
2021 mtu = bp->dev->mtu;
2022 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2023 IP_HEADER_ALIGNMENT_PADDING +
2024 ETH_OVREHEAD +
2025 mtu +
2026 BNX2X_FW_RX_ALIGN_END;
16a5fd92 2027 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
2028 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2029 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2030 else
2031 fp->rx_frag_size = 0;
a8c94b91
VZ
2032 }
2033}
2034
60cad4e6 2035static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
2036{
2037 int i;
619c5cb6
VZ
2038 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2039
16a5fd92 2040 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
2041 * enabled
2042 */
5d317c6a
MS
2043 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2044 bp->rss_conf_obj.ind_table[i] =
96305234
DK
2045 bp->fp->cl_id +
2046 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
2047
2048 /*
2049 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2050 * per-port, so if explicit configuration is needed , do it only
2051 * for a PMF.
2052 *
2053 * For 57712 and newer on the other hand it's a per-function
2054 * configuration.
2055 */
5d317c6a 2056 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
2057}
2058
60cad4e6
AE
2059int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2060 bool config_hash, bool enable)
619c5cb6 2061{
3b603066 2062 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
2063
2064 /* Although RSS is meaningless when there is a single HW queue we
2065 * still need it enabled in order to have HW Rx hash generated.
2066 *
2067 * if (!is_eth_multi(bp))
2068 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2069 */
2070
96305234 2071 params.rss_obj = rss_obj;
619c5cb6
VZ
2072
2073 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2074
60cad4e6
AE
2075 if (enable) {
2076 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2077
2078 /* RSS configuration */
2079 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2080 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2081 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2082 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2083 if (rss_obj->udp_rss_v4)
2084 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2085 if (rss_obj->udp_rss_v6)
2086 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
e42780b6
DK
2087
2088 if (!CHIP_IS_E1x(bp))
2089 /* valid only for TUNN_MODE_GRE tunnel mode */
2090 __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
60cad4e6
AE
2091 } else {
2092 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2093 }
619c5cb6 2094
96305234
DK
2095 /* Hash bits */
2096 params.rss_result_mask = MULTI_MASK;
619c5cb6 2097
5d317c6a 2098 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2099
96305234
DK
2100 if (config_hash) {
2101 /* RSS keys */
e3ec69ca 2102 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2103 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2104 }
2105
60cad4e6
AE
2106 if (IS_PF(bp))
2107 return bnx2x_config_rss(bp, &params);
2108 else
2109 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2110}
2111
1191cb83 2112static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2113{
3b603066 2114 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2115
2116 /* Prepare parameters for function state transitions */
2117 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2118
2119 func_params.f_obj = &bp->func_obj;
2120 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2121
2122 func_params.params.hw_init.load_phase = load_code;
2123
2124 return bnx2x_func_state_change(bp, &func_params);
2125}
2126
2127/*
2128 * Cleans the object that have internal lists without sending
16a5fd92 2129 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2130 */
7fa6f340 2131void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2132{
2133 int rc;
2134 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2135 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2136 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2137
2138 /***************** Cleanup MACs' object first *************************/
2139
2140 /* Wait for completion of requested */
2141 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2142 /* Perform a dry cleanup */
2143 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2144
2145 /* Clean ETH primary MAC */
2146 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2147 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2148 &ramrod_flags);
2149 if (rc != 0)
2150 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2151
2152 /* Cleanup UC list */
2153 vlan_mac_flags = 0;
2154 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2155 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2156 &ramrod_flags);
2157 if (rc != 0)
2158 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2159
2160 /***************** Now clean mcast object *****************************/
2161 rparam.mcast_obj = &bp->mcast_obj;
2162 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2163
8b09be5f
YM
2164 /* Add a DEL command... - Since we're doing a driver cleanup only,
2165 * we take a lock surrounding both the initial send and the CONTs,
2166 * as we don't want a true completion to disrupt us in the middle.
2167 */
2168 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2169 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2170 if (rc < 0)
51c1a580
MS
2171 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2172 rc);
619c5cb6
VZ
2173
2174 /* ...and wait until all pending commands are cleared */
2175 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2176 while (rc != 0) {
2177 if (rc < 0) {
2178 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2179 rc);
8b09be5f 2180 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2181 return;
2182 }
2183
2184 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2185 }
8b09be5f 2186 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2187}
2188
2189#ifndef BNX2X_STOP_ON_ERROR
2190#define LOAD_ERROR_EXIT(bp, label) \
2191 do { \
2192 (bp)->state = BNX2X_STATE_ERROR; \
2193 goto label; \
2194 } while (0)
55c11941
MS
2195
2196#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2197 do { \
2198 bp->cnic_loaded = false; \
2199 goto label; \
2200 } while (0)
2201#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2202#define LOAD_ERROR_EXIT(bp, label) \
2203 do { \
2204 (bp)->state = BNX2X_STATE_ERROR; \
2205 (bp)->panic = 1; \
2206 return -EBUSY; \
2207 } while (0)
55c11941
MS
2208#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2209 do { \
2210 bp->cnic_loaded = false; \
2211 (bp)->panic = 1; \
2212 return -EBUSY; \
2213 } while (0)
2214#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2215
ad5afc89
AE
2216static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2217{
2218 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2219 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2220 return;
2221}
2222
2223static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2224{
8db573ba 2225 int num_groups, vf_headroom = 0;
ad5afc89 2226 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2227
ad5afc89
AE
2228 /* number of queues for statistics is number of eth queues + FCoE */
2229 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2230
ad5afc89
AE
2231 /* Total number of FW statistics requests =
2232 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2233 * and fcoe l2 queue) stats + num of queues (which includes another 1
2234 * for fcoe l2 queue if applicable)
2235 */
2236 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2237
8db573ba
AE
2238 /* vf stats appear in the request list, but their data is allocated by
2239 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2240 * it is used to determine where to place the vf stats queries in the
2241 * request struct
2242 */
2243 if (IS_SRIOV(bp))
6411280a 2244 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2245
ad5afc89
AE
2246 /* Request is built from stats_query_header and an array of
2247 * stats_query_cmd_group each of which contains
2248 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2249 * configured in the stats_query_header.
2250 */
2251 num_groups =
8db573ba
AE
2252 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2253 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2254 1 : 0));
2255
8db573ba
AE
2256 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2257 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2258 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2259 num_groups * sizeof(struct stats_query_cmd_group);
2260
2261 /* Data for statistics requests + stats_counter
2262 * stats_counter holds per-STORM counters that are incremented
2263 * when STORM has finished with the current request.
2264 * memory for FCoE offloaded statistics are counted anyway,
2265 * even if they will not be sent.
2266 * VF stats are not accounted for here as the data of VF stats is stored
2267 * in memory allocated by the VF, not here.
2268 */
2269 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2270 sizeof(struct per_pf_stats) +
2271 sizeof(struct fcoe_statistics_params) +
2272 sizeof(struct per_queue_stats) * num_queue_stats +
2273 sizeof(struct stats_counter);
2274
cd2b0389
JP
2275 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2276 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2277 if (!bp->fw_stats)
2278 goto alloc_mem_err;
ad5afc89
AE
2279
2280 /* Set shortcuts */
2281 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2282 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2283 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2284 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2285 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2286 bp->fw_stats_req_sz;
2287
6bf07b8e 2288 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2289 U64_HI(bp->fw_stats_req_mapping),
2290 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2291 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2292 U64_HI(bp->fw_stats_data_mapping),
2293 U64_LO(bp->fw_stats_data_mapping));
2294 return 0;
2295
2296alloc_mem_err:
2297 bnx2x_free_fw_stats_mem(bp);
2298 BNX2X_ERR("Can't allocate FW stats memory\n");
2299 return -ENOMEM;
2300}
2301
2302/* send load request to mcp and analyze response */
2303static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2304{
178135c1
DK
2305 u32 param;
2306
ad5afc89
AE
2307 /* init fw_seq */
2308 bp->fw_seq =
2309 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2310 DRV_MSG_SEQ_NUMBER_MASK);
2311 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2312
2313 /* Get current FW pulse sequence */
2314 bp->fw_drv_pulse_wr_seq =
2315 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2316 DRV_PULSE_SEQ_MASK);
2317 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2318
178135c1
DK
2319 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2320
2321 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2322 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2323
ad5afc89 2324 /* load request */
178135c1 2325 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2326
2327 /* if mcp fails to respond we must abort */
2328 if (!(*load_code)) {
2329 BNX2X_ERR("MCP response failure, aborting\n");
2330 return -EBUSY;
2331 }
2332
2333 /* If mcp refused (e.g. other port is in diagnostic mode) we
2334 * must abort
2335 */
2336 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2337 BNX2X_ERR("MCP refused load request, aborting\n");
2338 return -EBUSY;
2339 }
2340 return 0;
2341}
2342
2343/* check whether another PF has already loaded FW to chip. In
2344 * virtualized environments a pf from another VM may have already
2345 * initialized the device including loading FW
2346 */
91ebb929 2347int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
ad5afc89
AE
2348{
2349 /* is another pf loaded on this engine? */
2350 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2351 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2352 /* build my FW version dword */
2353 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2354 (BCM_5710_FW_MINOR_VERSION << 8) +
2355 (BCM_5710_FW_REVISION_VERSION << 16) +
2356 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2357
2358 /* read loaded FW from chip */
2359 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2360
2361 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2362 loaded_fw, my_fw);
2363
2364 /* abort nic load if version mismatch */
2365 if (my_fw != loaded_fw) {
91ebb929
YM
2366 if (print_err)
2367 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2368 loaded_fw, my_fw);
2369 else
2370 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2371 loaded_fw, my_fw);
ad5afc89
AE
2372 return -EBUSY;
2373 }
2374 }
2375 return 0;
2376}
2377
2378/* returns the "mcp load_code" according to global load_count array */
2379static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2380{
2381 int path = BP_PATH(bp);
2382
2383 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
a8f47eb7 2384 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2385 bnx2x_load_count[path][2]);
2386 bnx2x_load_count[path][0]++;
2387 bnx2x_load_count[path][1 + port]++;
ad5afc89 2388 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
a8f47eb7 2389 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2390 bnx2x_load_count[path][2]);
2391 if (bnx2x_load_count[path][0] == 1)
ad5afc89 2392 return FW_MSG_CODE_DRV_LOAD_COMMON;
a8f47eb7 2393 else if (bnx2x_load_count[path][1 + port] == 1)
ad5afc89
AE
2394 return FW_MSG_CODE_DRV_LOAD_PORT;
2395 else
2396 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2397}
2398
2399/* mark PMF if applicable */
2400static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2401{
2402 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2403 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2404 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2405 bp->port.pmf = 1;
2406 /* We need the barrier to ensure the ordering between the
2407 * writing to bp->port.pmf here and reading it from the
2408 * bnx2x_periodic_task().
2409 */
2410 smp_mb();
2411 } else {
2412 bp->port.pmf = 0;
452427b0
YM
2413 }
2414
ad5afc89
AE
2415 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2416}
2417
2418static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2419{
2420 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2421 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2422 (bp->common.shmem2_base)) {
2423 if (SHMEM2_HAS(bp, dcc_support))
2424 SHMEM2_WR(bp, dcc_support,
2425 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2426 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2427 if (SHMEM2_HAS(bp, afex_driver_support))
2428 SHMEM2_WR(bp, afex_driver_support,
2429 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2430 }
2431
2432 /* Set AFEX default VLAN tag to an invalid value */
2433 bp->afex_def_vlan_tag = -1;
452427b0
YM
2434}
2435
1191cb83
ED
2436/**
2437 * bnx2x_bz_fp - zero content of the fastpath structure.
2438 *
2439 * @bp: driver handle
2440 * @index: fastpath index to be zeroed
2441 *
2442 * Makes sure the contents of the bp->fp[index].napi is kept
2443 * intact.
2444 */
2445static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2446{
2447 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2448 int cos;
1191cb83 2449 struct napi_struct orig_napi = fp->napi;
15192a8c 2450 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2451
1191cb83 2452 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2453 if (fp->tpa_info)
2454 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2455 sizeof(struct bnx2x_agg_info));
2456 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2457
2458 /* Restore the NAPI object as it has been already initialized */
2459 fp->napi = orig_napi;
15192a8c 2460 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2461 fp->bp = bp;
2462 fp->index = index;
2463 if (IS_ETH_FP(fp))
2464 fp->max_cos = bp->max_cos;
2465 else
2466 /* Special queues support only one CoS */
2467 fp->max_cos = 1;
2468
65565884 2469 /* Init txdata pointers */
65565884
MS
2470 if (IS_FCOE_FP(fp))
2471 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2472 if (IS_ETH_FP(fp))
2473 for_each_cos_in_tx_queue(fp, cos)
2474 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2475 BNX2X_NUM_ETH_QUEUES(bp) + index];
2476
16a5fd92 2477 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2478 * minimal size so it must be set prior to queue memory allocation
2479 */
f8dcb5e3 2480 if (bp->dev->features & NETIF_F_LRO)
1191cb83 2481 fp->mode = TPA_MODE_LRO;
f8dcb5e3 2482 else if (bp->dev->features & NETIF_F_GRO &&
7e6b4d44 2483 bnx2x_mtu_allows_gro(bp->dev->mtu))
1191cb83 2484 fp->mode = TPA_MODE_GRO;
7e6b4d44
MS
2485 else
2486 fp->mode = TPA_MODE_DISABLED;
1191cb83 2487
22a8f237
MS
2488 /* We don't want TPA if it's disabled in bp
2489 * or if this is an FCoE L2 ring.
2490 */
2491 if (bp->disable_tpa || IS_FCOE_FP(fp))
7e6b4d44 2492 fp->mode = TPA_MODE_DISABLED;
55c11941
MS
2493}
2494
2495int bnx2x_load_cnic(struct bnx2x *bp)
2496{
2497 int i, rc, port = BP_PORT(bp);
2498
2499 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2500
2501 mutex_init(&bp->cnic_mutex);
2502
ad5afc89
AE
2503 if (IS_PF(bp)) {
2504 rc = bnx2x_alloc_mem_cnic(bp);
2505 if (rc) {
2506 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2507 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2508 }
55c11941
MS
2509 }
2510
2511 rc = bnx2x_alloc_fp_mem_cnic(bp);
2512 if (rc) {
2513 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2514 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2515 }
2516
2517 /* Update the number of queues with the cnic queues */
2518 rc = bnx2x_set_real_num_queues(bp, 1);
2519 if (rc) {
2520 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2521 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2522 }
2523
2524 /* Add all CNIC NAPI objects */
2525 bnx2x_add_all_napi_cnic(bp);
2526 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2527 bnx2x_napi_enable_cnic(bp);
2528
2529 rc = bnx2x_init_hw_func_cnic(bp);
2530 if (rc)
2531 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2532
2533 bnx2x_nic_init_cnic(bp);
2534
ad5afc89
AE
2535 if (IS_PF(bp)) {
2536 /* Enable Timer scan */
2537 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2538
2539 /* setup cnic queues */
2540 for_each_cnic_queue(bp, i) {
2541 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2542 if (rc) {
2543 BNX2X_ERR("Queue setup failed\n");
2544 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2545 }
55c11941
MS
2546 }
2547 }
2548
2549 /* Initialize Rx filter. */
8b09be5f 2550 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2551
2552 /* re-read iscsi info */
2553 bnx2x_get_iscsi_info(bp);
2554 bnx2x_setup_cnic_irq_info(bp);
2555 bnx2x_setup_cnic_info(bp);
2556 bp->cnic_loaded = true;
2557 if (bp->state == BNX2X_STATE_OPEN)
2558 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2559
55c11941
MS
2560 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2561
2562 return 0;
2563
2564#ifndef BNX2X_STOP_ON_ERROR
2565load_error_cnic2:
2566 /* Disable Timer scan */
2567 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2568
2569load_error_cnic1:
2570 bnx2x_napi_disable_cnic(bp);
2571 /* Update the number of queues without the cnic queues */
d9d81862 2572 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2573 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2574load_error_cnic0:
2575 BNX2X_ERR("CNIC-related load failed\n");
2576 bnx2x_free_fp_mem_cnic(bp);
2577 bnx2x_free_mem_cnic(bp);
2578 return rc;
2579#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2580}
2581
9f6c9258
DK
2582/* must be called with rtnl_lock */
2583int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2584{
619c5cb6 2585 int port = BP_PORT(bp);
ad5afc89 2586 int i, rc = 0, load_code = 0;
9f6c9258 2587
55c11941
MS
2588 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2589 DP(NETIF_MSG_IFUP,
2590 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2591
9f6c9258 2592#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2593 if (unlikely(bp->panic)) {
2594 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2595 return -EPERM;
51c1a580 2596 }
9f6c9258
DK
2597#endif
2598
2599 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2600
16a5fd92 2601 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2602 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2603 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2604 &bp->last_reported_link.link_report_flags);
2ae17f66 2605
ad5afc89
AE
2606 if (IS_PF(bp))
2607 /* must be called before memory allocation and HW init */
2608 bnx2x_ilt_set_info(bp);
523224a3 2609
6383c0b3
AE
2610 /*
2611 * Zero fastpath structures preserving invariants like napi, which are
2612 * allocated only once, fp index, max_cos, bp pointer.
7e6b4d44 2613 * Also set fp->mode and txdata_ptr.
b3b83c3f 2614 */
51c1a580 2615 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2616 for_each_queue(bp, i)
2617 bnx2x_bz_fp(bp, i);
55c11941
MS
2618 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2619 bp->num_cnic_queues) *
2620 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2621
55c11941 2622 bp->fcoe_init = false;
6383c0b3 2623
a8c94b91
VZ
2624 /* Set the receive queues buffer size */
2625 bnx2x_set_rx_buf_size(bp);
2626
ad5afc89
AE
2627 if (IS_PF(bp)) {
2628 rc = bnx2x_alloc_mem(bp);
2629 if (rc) {
2630 BNX2X_ERR("Unable to allocate bp memory\n");
2631 return rc;
2632 }
2633 }
2634
ad5afc89
AE
2635 /* need to be done after alloc mem, since it's self adjusting to amount
2636 * of memory available for RSS queues
2637 */
2638 rc = bnx2x_alloc_fp_mem(bp);
2639 if (rc) {
2640 BNX2X_ERR("Unable to allocate memory for fps\n");
2641 LOAD_ERROR_EXIT(bp, load_error0);
2642 }
d6214d7a 2643
e3ed4eae
DK
2644 /* Allocated memory for FW statistics */
2645 if (bnx2x_alloc_fw_stats_mem(bp))
2646 LOAD_ERROR_EXIT(bp, load_error0);
2647
8d9ac297
AE
2648 /* request pf to initialize status blocks */
2649 if (IS_VF(bp)) {
2650 rc = bnx2x_vfpf_init(bp);
2651 if (rc)
2652 LOAD_ERROR_EXIT(bp, load_error0);
2653 }
2654
b3b83c3f
DK
2655 /* As long as bnx2x_alloc_mem() may possibly update
2656 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2657 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2658 */
55c11941 2659 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2660 if (rc) {
ec6ba945 2661 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2662 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2663 }
2664
6383c0b3 2665 /* configure multi cos mappings in kernel.
16a5fd92
YM
2666 * this configuration may be overridden by a multi class queue
2667 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2668 */
2669 bnx2x_setup_tc(bp->dev, bp->max_cos);
2670
26614ba5
MS
2671 /* Add all NAPI objects */
2672 bnx2x_add_all_napi(bp);
55c11941 2673 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2674 bnx2x_napi_enable(bp);
2675
ad5afc89
AE
2676 if (IS_PF(bp)) {
2677 /* set pf load just before approaching the MCP */
2678 bnx2x_set_pf_load(bp);
2679
2680 /* if mcp exists send load request and analyze response */
2681 if (!BP_NOMCP(bp)) {
2682 /* attempt to load pf */
2683 rc = bnx2x_nic_load_request(bp, &load_code);
2684 if (rc)
2685 LOAD_ERROR_EXIT(bp, load_error1);
2686
2687 /* what did mcp say? */
91ebb929 2688 rc = bnx2x_compare_fw_ver(bp, load_code, true);
ad5afc89
AE
2689 if (rc) {
2690 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2691 LOAD_ERROR_EXIT(bp, load_error2);
2692 }
ad5afc89
AE
2693 } else {
2694 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2695 }
9f6c9258 2696
ad5afc89
AE
2697 /* mark pmf if applicable */
2698 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2699
ad5afc89
AE
2700 /* Init Function state controlling object */
2701 bnx2x__init_func_obj(bp);
6383c0b3 2702
ad5afc89
AE
2703 /* Initialize HW */
2704 rc = bnx2x_init_hw(bp, load_code);
2705 if (rc) {
2706 BNX2X_ERR("HW init failed, aborting\n");
2707 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2708 LOAD_ERROR_EXIT(bp, load_error2);
2709 }
9f6c9258
DK
2710 }
2711
ecf01c22
YM
2712 bnx2x_pre_irq_nic_init(bp);
2713
d6214d7a
DK
2714 /* Connect to IRQs */
2715 rc = bnx2x_setup_irqs(bp);
523224a3 2716 if (rc) {
ad5afc89
AE
2717 BNX2X_ERR("setup irqs failed\n");
2718 if (IS_PF(bp))
2719 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2720 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2721 }
2722
619c5cb6 2723 /* Init per-function objects */
ad5afc89 2724 if (IS_PF(bp)) {
ecf01c22
YM
2725 /* Setup NIC internals and enable interrupts */
2726 bnx2x_post_irq_nic_init(bp, load_code);
2727
ad5afc89 2728 bnx2x_init_bp_objs(bp);
b56e9670 2729 bnx2x_iov_nic_init(bp);
a3348722 2730
ad5afc89
AE
2731 /* Set AFEX default VLAN tag to an invalid value */
2732 bp->afex_def_vlan_tag = -1;
2733 bnx2x_nic_load_afex_dcc(bp, load_code);
2734 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2735 rc = bnx2x_func_start(bp);
2736 if (rc) {
2737 BNX2X_ERR("Function start failed!\n");
2738 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2739
619c5cb6 2740 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2741 }
9f6c9258 2742
ad5afc89
AE
2743 /* Send LOAD_DONE command to MCP */
2744 if (!BP_NOMCP(bp)) {
2745 load_code = bnx2x_fw_command(bp,
2746 DRV_MSG_CODE_LOAD_DONE, 0);
2747 if (!load_code) {
2748 BNX2X_ERR("MCP response failure, aborting\n");
2749 rc = -EBUSY;
2750 LOAD_ERROR_EXIT(bp, load_error3);
2751 }
2752 }
9f6c9258 2753
0c14e5ce
AE
2754 /* initialize FW coalescing state machines in RAM */
2755 bnx2x_update_coalesce(bp);
60cad4e6 2756 }
0c14e5ce 2757
60cad4e6
AE
2758 /* setup the leading queue */
2759 rc = bnx2x_setup_leading(bp);
2760 if (rc) {
2761 BNX2X_ERR("Setup leading failed!\n");
2762 LOAD_ERROR_EXIT(bp, load_error3);
2763 }
ad5afc89 2764
60cad4e6
AE
2765 /* set up the rest of the queues */
2766 for_each_nondefault_eth_queue(bp, i) {
2767 if (IS_PF(bp))
2768 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2769 else /* VF */
2770 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2771 if (rc) {
60cad4e6 2772 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2773 LOAD_ERROR_EXIT(bp, load_error3);
2774 }
60cad4e6 2775 }
8d9ac297 2776
60cad4e6
AE
2777 /* setup rss */
2778 rc = bnx2x_init_rss(bp);
2779 if (rc) {
2780 BNX2X_ERR("PF RSS init failed\n");
2781 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2782 }
619c5cb6 2783
523224a3
DK
2784 /* Now when Clients are configured we are ready to work */
2785 bp->state = BNX2X_STATE_OPEN;
2786
619c5cb6 2787 /* Configure a ucast MAC */
ad5afc89
AE
2788 if (IS_PF(bp))
2789 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2790 else /* vf */
f8f4f61a
DK
2791 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2792 true);
51c1a580
MS
2793 if (rc) {
2794 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2795 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2796 }
6e30dd4e 2797
ad5afc89 2798 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2799 bnx2x_update_max_mf_config(bp, bp->pending_max);
2800 bp->pending_max = 0;
2801 }
2802
ad5afc89
AE
2803 if (bp->port.pmf) {
2804 rc = bnx2x_initial_phy_init(bp, load_mode);
2805 if (rc)
2806 LOAD_ERROR_EXIT(bp, load_error3);
2807 }
c63da990 2808 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2809
619c5cb6
VZ
2810 /* Start fast path */
2811
2812 /* Initialize Rx filter. */
8b09be5f 2813 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2814
eeed018c
MK
2815 if (bp->flags & PTP_SUPPORTED) {
2816 bnx2x_init_ptp(bp);
2817 bnx2x_configure_ptp_filters(bp);
2818 }
2819 /* Start Tx */
9f6c9258
DK
2820 switch (load_mode) {
2821 case LOAD_NORMAL:
16a5fd92 2822 /* Tx queue should be only re-enabled */
523224a3 2823 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2824 break;
2825
2826 case LOAD_OPEN:
2827 netif_tx_start_all_queues(bp->dev);
4e857c58 2828 smp_mb__after_atomic();
9f6c9258
DK
2829 break;
2830
2831 case LOAD_DIAG:
8970b2e4 2832 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2833 bp->state = BNX2X_STATE_DIAG;
2834 break;
2835
2836 default:
2837 break;
2838 }
2839
00253a8c 2840 if (bp->port.pmf)
4c704899 2841 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2842 else
9f6c9258
DK
2843 bnx2x__link_status_update(bp);
2844
2845 /* start the timer */
2846 mod_timer(&bp->timer, jiffies + bp->current_interval);
2847
55c11941
MS
2848 if (CNIC_ENABLED(bp))
2849 bnx2x_load_cnic(bp);
9f6c9258 2850
42f8277f
YM
2851 if (IS_PF(bp))
2852 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2853
ad5afc89
AE
2854 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2855 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2856 u32 val;
2857 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2858 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2859 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2860 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2861 }
2862
619c5cb6 2863 /* Wait for all pending SP commands to complete */
ad5afc89 2864 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2865 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2866 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2867 return -EBUSY;
2868 }
6891dd25 2869
9876879f
BW
2870 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2871 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2872 bnx2x_dcbx_init(bp, false);
2873
55c11941
MS
2874 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2875
9f6c9258
DK
2876 return 0;
2877
619c5cb6 2878#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2879load_error3:
ad5afc89
AE
2880 if (IS_PF(bp)) {
2881 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2882
ad5afc89
AE
2883 /* Clean queueable objects */
2884 bnx2x_squeeze_objects(bp);
2885 }
619c5cb6 2886
9f6c9258
DK
2887 /* Free SKBs, SGEs, TPA pool and driver internals */
2888 bnx2x_free_skbs(bp);
ec6ba945 2889 for_each_rx_queue(bp, i)
9f6c9258 2890 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2891
9f6c9258 2892 /* Release IRQs */
d6214d7a
DK
2893 bnx2x_free_irq(bp);
2894load_error2:
ad5afc89 2895 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2896 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2897 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2898 }
2899
2900 bp->port.pmf = 0;
9f6c9258
DK
2901load_error1:
2902 bnx2x_napi_disable(bp);
722c6f58 2903 bnx2x_del_all_napi(bp);
ad5afc89 2904
889b9af3 2905 /* clear pf_load status, as it was already set */
ad5afc89
AE
2906 if (IS_PF(bp))
2907 bnx2x_clear_pf_load(bp);
d6214d7a 2908load_error0:
ad5afc89 2909 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2910 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2911 bnx2x_free_mem(bp);
2912
2913 return rc;
619c5cb6 2914#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2915}
2916
7fa6f340 2917int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2918{
2919 u8 rc = 0, cos, i;
2920
2921 /* Wait until tx fastpath tasks complete */
2922 for_each_tx_queue(bp, i) {
2923 struct bnx2x_fastpath *fp = &bp->fp[i];
2924
2925 for_each_cos_in_tx_queue(fp, cos)
2926 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2927 if (rc)
2928 return rc;
2929 }
2930 return 0;
2931}
2932
9f6c9258 2933/* must be called with rtnl_lock */
5d07d868 2934int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2935{
2936 int i;
c9ee9206
VZ
2937 bool global = false;
2938
55c11941
MS
2939 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2940
9ce392d4 2941 /* mark driver is unloaded in shmem2 */
ad5afc89 2942 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2943 u32 val;
2944 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2945 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2946 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2947 }
2948
80bfe5cc 2949 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2950 (bp->state == BNX2X_STATE_CLOSED ||
2951 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2952 /* We can get here if the driver has been unloaded
2953 * during parity error recovery and is either waiting for a
2954 * leader to complete or for other functions to unload and
2955 * then ifdown has been issued. In this case we want to
2956 * unload and let other functions to complete a recovery
2957 * process.
2958 */
9f6c9258
DK
2959 bp->recovery_state = BNX2X_RECOVERY_DONE;
2960 bp->is_leader = 0;
c9ee9206
VZ
2961 bnx2x_release_leader_lock(bp);
2962 smp_mb();
2963
51c1a580
MS
2964 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2965 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2966 return -EINVAL;
2967 }
2968
80bfe5cc 2969 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2970 * have not completed successfully - all resources are released.
80bfe5cc
YM
2971 *
2972 * we can get here only after unsuccessful ndo_* callback, during which
2973 * dev->IFF_UP flag is still on.
2974 */
2975 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2976 return 0;
2977
2978 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2979 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2980 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2981 */
2982 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2983 smp_mb();
2984
78c3bcc5
AE
2985 /* indicate to VFs that the PF is going down */
2986 bnx2x_iov_channel_down(bp);
2987
55c11941
MS
2988 if (CNIC_LOADED(bp))
2989 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2990
9505ee37
VZ
2991 /* Stop Tx */
2992 bnx2x_tx_disable(bp);
65565884 2993 netdev_reset_tc(bp->dev);
9505ee37 2994
9f6c9258 2995 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2996
9f6c9258 2997 del_timer_sync(&bp->timer);
f85582f8 2998
ad5afc89
AE
2999 if (IS_PF(bp)) {
3000 /* Set ALWAYS_ALIVE bit in shmem */
3001 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3002 bnx2x_drv_pulse(bp);
3003 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3004 bnx2x_save_statistics(bp);
3005 }
9f6c9258 3006
ad5afc89
AE
3007 /* wait till consumers catch up with producers in all queues */
3008 bnx2x_drain_tx_queues(bp);
9f6c9258 3009
9b176b6b
AE
3010 /* if VF indicate to PF this function is going down (PF will delete sp
3011 * elements and clear initializations
3012 */
3013 if (IS_VF(bp))
3014 bnx2x_vfpf_close_vf(bp);
3015 else if (unload_mode != UNLOAD_RECOVERY)
3016 /* if this is a normal/close unload need to clean up chip*/
5d07d868 3017 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 3018 else {
c9ee9206
VZ
3019 /* Send the UNLOAD_REQUEST to the MCP */
3020 bnx2x_send_unload_req(bp, unload_mode);
3021
16a5fd92 3022 /* Prevent transactions to host from the functions on the
c9ee9206 3023 * engine that doesn't reset global blocks in case of global
16a5fd92 3024 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
3025 * (the engine which leader will perform the recovery
3026 * last).
3027 */
3028 if (!CHIP_IS_E1x(bp))
3029 bnx2x_pf_disable(bp);
3030
3031 /* Disable HW interrupts, NAPI */
523224a3 3032 bnx2x_netif_stop(bp, 1);
26614ba5
MS
3033 /* Delete all NAPI objects */
3034 bnx2x_del_all_napi(bp);
55c11941
MS
3035 if (CNIC_LOADED(bp))
3036 bnx2x_del_all_napi_cnic(bp);
523224a3 3037 /* Release IRQs */
d6214d7a 3038 bnx2x_free_irq(bp);
c9ee9206
VZ
3039
3040 /* Report UNLOAD_DONE to MCP */
5d07d868 3041 bnx2x_send_unload_done(bp, false);
523224a3 3042 }
9f6c9258 3043
619c5cb6 3044 /*
16a5fd92 3045 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
3046 * the queueable objects here in case they failed to get cleaned so far.
3047 */
ad5afc89
AE
3048 if (IS_PF(bp))
3049 bnx2x_squeeze_objects(bp);
619c5cb6 3050
79616895
VZ
3051 /* There should be no more pending SP commands at this stage */
3052 bp->sp_state = 0;
3053
9f6c9258
DK
3054 bp->port.pmf = 0;
3055
a0d307b2
DK
3056 /* clear pending work in rtnl task */
3057 bp->sp_rtnl_state = 0;
3058 smp_mb();
3059
9f6c9258
DK
3060 /* Free SKBs, SGEs, TPA pool and driver internals */
3061 bnx2x_free_skbs(bp);
55c11941
MS
3062 if (CNIC_LOADED(bp))
3063 bnx2x_free_skbs_cnic(bp);
ec6ba945 3064 for_each_rx_queue(bp, i)
9f6c9258 3065 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 3066
ad5afc89
AE
3067 bnx2x_free_fp_mem(bp);
3068 if (CNIC_LOADED(bp))
55c11941 3069 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 3070
ad5afc89 3071 if (IS_PF(bp)) {
ad5afc89
AE
3072 if (CNIC_LOADED(bp))
3073 bnx2x_free_mem_cnic(bp);
3074 }
b4cddbd6
AE
3075 bnx2x_free_mem(bp);
3076
9f6c9258 3077 bp->state = BNX2X_STATE_CLOSED;
55c11941 3078 bp->cnic_loaded = false;
9f6c9258 3079
42f8277f
YM
3080 /* Clear driver version indication in shmem */
3081 if (IS_PF(bp))
3082 bnx2x_update_mng_version(bp);
3083
c9ee9206
VZ
3084 /* Check if there are pending parity attentions. If there are - set
3085 * RECOVERY_IN_PROGRESS.
3086 */
ad5afc89 3087 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
3088 bnx2x_set_reset_in_progress(bp);
3089
3090 /* Set RESET_IS_GLOBAL if needed */
3091 if (global)
3092 bnx2x_set_reset_global(bp);
3093 }
3094
9f6c9258
DK
3095 /* The last driver must disable a "close the gate" if there is no
3096 * parity attention or "process kill" pending.
3097 */
ad5afc89
AE
3098 if (IS_PF(bp) &&
3099 !bnx2x_clear_pf_load(bp) &&
3100 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3101 bnx2x_disable_close_the_gate(bp);
3102
55c11941
MS
3103 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3104
9f6c9258
DK
3105 return 0;
3106}
f85582f8 3107
9f6c9258
DK
3108int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3109{
3110 u16 pmcsr;
3111
adf5f6a1 3112 /* If there is no power capability, silently succeed */
29ed74c3 3113 if (!bp->pdev->pm_cap) {
51c1a580 3114 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3115 return 0;
3116 }
3117
29ed74c3 3118 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3119
3120 switch (state) {
3121 case PCI_D0:
29ed74c3 3122 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3123 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3124 PCI_PM_CTRL_PME_STATUS));
3125
3126 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3127 /* delay required during transition out of D3hot */
3128 msleep(20);
3129 break;
3130
3131 case PCI_D3hot:
3132 /* If there are other clients above don't
3133 shut down the power */
3134 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3135 return 0;
3136 /* Don't shut down the power for emulation and FPGA */
3137 if (CHIP_REV_IS_SLOW(bp))
3138 return 0;
3139
3140 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3141 pmcsr |= 3;
3142
3143 if (bp->wol)
3144 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3145
29ed74c3 3146 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3147 pmcsr);
3148
3149 /* No more memory access after this point until
3150 * device is brought back to D0.
3151 */
3152 break;
3153
3154 default:
51c1a580 3155 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3156 return -EINVAL;
3157 }
3158 return 0;
3159}
3160
9f6c9258
DK
3161/*
3162 * net_device service functions
3163 */
a8f47eb7 3164static int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3165{
3166 int work_done = 0;
6383c0b3 3167 u8 cos;
9f6c9258
DK
3168 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3169 napi);
3170 struct bnx2x *bp = fp->bp;
3171
3172 while (1) {
3173#ifdef BNX2X_STOP_ON_ERROR
3174 if (unlikely(bp->panic)) {
3175 napi_complete(napi);
3176 return 0;
3177 }
3178#endif
8f20aa57 3179 if (!bnx2x_fp_lock_napi(fp))
24e579c8 3180 return budget;
9f6c9258 3181
6383c0b3 3182 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3183 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3184 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3185
9f6c9258
DK
3186 if (bnx2x_has_rx_work(fp)) {
3187 work_done += bnx2x_rx_int(fp, budget - work_done);
3188
3189 /* must not complete if we consumed full budget */
8f20aa57
DK
3190 if (work_done >= budget) {
3191 bnx2x_fp_unlock_napi(fp);
9f6c9258 3192 break;
8f20aa57 3193 }
9f6c9258
DK
3194 }
3195
074975d0
ED
3196 bnx2x_fp_unlock_napi(fp);
3197
9f6c9258 3198 /* Fall out from the NAPI loop if needed */
074975d0 3199 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3200
ec6ba945
VZ
3201 /* No need to update SB for FCoE L2 ring as long as
3202 * it's connected to the default SB and the SB
3203 * has been updated when NAPI was scheduled.
3204 */
3205 if (IS_FCOE_FP(fp)) {
3206 napi_complete(napi);
3207 break;
3208 }
9f6c9258 3209 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3210 /* bnx2x_has_rx_work() reads the status block,
3211 * thus we need to ensure that status block indices
3212 * have been actually read (bnx2x_update_fpsb_idx)
3213 * prior to this check (bnx2x_has_rx_work) so that
3214 * we won't write the "newer" value of the status block
3215 * to IGU (if there was a DMA right after
3216 * bnx2x_has_rx_work and if there is no rmb, the memory
3217 * reading (bnx2x_update_fpsb_idx) may be postponed
3218 * to right before bnx2x_ack_sb). In this case there
3219 * will never be another interrupt until there is
3220 * another update of the status block, while there
3221 * is still unhandled work.
3222 */
9f6c9258
DK
3223 rmb();
3224
3225 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3226 napi_complete(napi);
3227 /* Re-enable interrupts */
51c1a580 3228 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3229 "Update index to %d\n", fp->fp_hc_idx);
3230 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3231 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3232 IGU_INT_ENABLE, 1);
3233 break;
3234 }
3235 }
3236 }
3237
3238 return work_done;
3239}
3240
e0d1095a 3241#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3242/* must be called with local_bh_disable()d */
3243int bnx2x_low_latency_recv(struct napi_struct *napi)
3244{
3245 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3246 napi);
3247 struct bnx2x *bp = fp->bp;
3248 int found = 0;
3249
3250 if ((bp->state == BNX2X_STATE_CLOSED) ||
3251 (bp->state == BNX2X_STATE_ERROR) ||
f8dcb5e3 3252 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
8f20aa57
DK
3253 return LL_FLUSH_FAILED;
3254
3255 if (!bnx2x_fp_lock_poll(fp))
3256 return LL_FLUSH_BUSY;
3257
75b29459 3258 if (bnx2x_has_rx_work(fp))
8f20aa57 3259 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3260
3261 bnx2x_fp_unlock_poll(fp);
3262
3263 return found;
3264}
3265#endif
3266
9f6c9258
DK
3267/* we split the first BD into headers and data BDs
3268 * to ease the pain of our fellow microcode engineers
3269 * we use one mapping for both BDs
9f6c9258 3270 */
91226790
DK
3271static u16 bnx2x_tx_split(struct bnx2x *bp,
3272 struct bnx2x_fp_txdata *txdata,
3273 struct sw_tx_bd *tx_buf,
3274 struct eth_tx_start_bd **tx_bd, u16 hlen,
3275 u16 bd_prod)
9f6c9258
DK
3276{
3277 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3278 struct eth_tx_bd *d_tx_bd;
3279 dma_addr_t mapping;
3280 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3281
3282 /* first fix first BD */
9f6c9258
DK
3283 h_tx_bd->nbytes = cpu_to_le16(hlen);
3284
91226790
DK
3285 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3286 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3287
3288 /* now get a new data BD
3289 * (after the pbd) and fill it */
3290 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3291 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3292
3293 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3294 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3295
3296 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3297 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3298 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3299
3300 /* this marks the BD as one that has no individual mapping */
3301 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3302
3303 DP(NETIF_MSG_TX_QUEUED,
3304 "TSO split data size is %d (%x:%x)\n",
3305 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3306
3307 /* update tx_bd */
3308 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3309
3310 return bd_prod;
3311}
3312
86564c3f
YM
3313#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3314#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3315static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3316{
86564c3f
YM
3317 __sum16 tsum = (__force __sum16) csum;
3318
9f6c9258 3319 if (fix > 0)
86564c3f
YM
3320 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3321 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3322
3323 else if (fix < 0)
86564c3f
YM
3324 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3325 csum_partial(t_header, -fix, 0)));
9f6c9258 3326
e2593fcd 3327 return bswab16(tsum);
9f6c9258
DK
3328}
3329
91226790 3330static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3331{
3332 u32 rc;
a848ade4
DK
3333 __u8 prot = 0;
3334 __be16 protocol;
9f6c9258
DK
3335
3336 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3337 return XMIT_PLAIN;
9f6c9258 3338
a848ade4
DK
3339 protocol = vlan_get_protocol(skb);
3340 if (protocol == htons(ETH_P_IPV6)) {
3341 rc = XMIT_CSUM_V6;
3342 prot = ipv6_hdr(skb)->nexthdr;
3343 } else {
3344 rc = XMIT_CSUM_V4;
3345 prot = ip_hdr(skb)->protocol;
3346 }
9f6c9258 3347
a848ade4
DK
3348 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3349 if (inner_ip_hdr(skb)->version == 6) {
3350 rc |= XMIT_CSUM_ENC_V6;
3351 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3352 rc |= XMIT_CSUM_TCP;
9f6c9258 3353 } else {
a848ade4
DK
3354 rc |= XMIT_CSUM_ENC_V4;
3355 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3356 rc |= XMIT_CSUM_TCP;
3357 }
3358 }
a848ade4
DK
3359 if (prot == IPPROTO_TCP)
3360 rc |= XMIT_CSUM_TCP;
9f6c9258 3361
36a8f39e
ED
3362 if (skb_is_gso(skb)) {
3363 if (skb_is_gso_v6(skb)) {
3364 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3365 if (rc & XMIT_CSUM_ENC)
3366 rc |= XMIT_GSO_ENC_V6;
3367 } else {
3368 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3369 if (rc & XMIT_CSUM_ENC)
3370 rc |= XMIT_GSO_ENC_V4;
3371 }
a848ade4 3372 }
9f6c9258
DK
3373
3374 return rc;
3375}
3376
3377#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3378/* check if packet requires linearization (packet is too fragmented)
3379 no need to check fragmentation if page size > 8K (there will be no
3380 violation to FW restrictions) */
3381static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3382 u32 xmit_type)
3383{
3384 int to_copy = 0;
3385 int hlen = 0;
3386 int first_bd_sz = 0;
3387
3388 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3389 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3390
3391 if (xmit_type & XMIT_GSO) {
3392 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3393 /* Check if LSO packet needs to be copied:
3394 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3395 int wnd_size = MAX_FETCH_BD - 3;
3396 /* Number of windows to check */
3397 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3398 int wnd_idx = 0;
3399 int frag_idx = 0;
3400 u32 wnd_sum = 0;
3401
3402 /* Headers length */
3403 hlen = (int)(skb_transport_header(skb) - skb->data) +
3404 tcp_hdrlen(skb);
3405
3406 /* Amount of data (w/o headers) on linear part of SKB*/
3407 first_bd_sz = skb_headlen(skb) - hlen;
3408
3409 wnd_sum = first_bd_sz;
3410
3411 /* Calculate the first sum - it's special */
3412 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3413 wnd_sum +=
9e903e08 3414 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3415
3416 /* If there was data on linear skb data - check it */
3417 if (first_bd_sz > 0) {
3418 if (unlikely(wnd_sum < lso_mss)) {
3419 to_copy = 1;
3420 goto exit_lbl;
3421 }
3422
3423 wnd_sum -= first_bd_sz;
3424 }
3425
3426 /* Others are easier: run through the frag list and
3427 check all windows */
3428 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3429 wnd_sum +=
9e903e08 3430 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3431
3432 if (unlikely(wnd_sum < lso_mss)) {
3433 to_copy = 1;
3434 break;
3435 }
3436 wnd_sum -=
9e903e08 3437 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3438 }
3439 } else {
3440 /* in non-LSO too fragmented packet should always
3441 be linearized */
3442 to_copy = 1;
3443 }
3444 }
3445
3446exit_lbl:
3447 if (unlikely(to_copy))
3448 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3449 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3450 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3451 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3452
3453 return to_copy;
3454}
3455#endif
3456
f2e0899f 3457/**
e8920674 3458 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3459 *
e8920674
DK
3460 * @skb: packet skb
3461 * @pbd: parse BD
3462 * @xmit_type: xmit flags
f2e0899f 3463 */
91226790
DK
3464static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3465 struct eth_tx_parse_bd_e1x *pbd,
3466 u32 xmit_type)
f2e0899f
DK
3467{
3468 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3469 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3470 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3471
3472 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3473 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3474 pbd->tcp_pseudo_csum =
86564c3f
YM
3475 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3476 ip_hdr(skb)->daddr,
3477 0, IPPROTO_TCP, 0));
057cf65e 3478 } else {
f2e0899f 3479 pbd->tcp_pseudo_csum =
86564c3f
YM
3480 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3481 &ipv6_hdr(skb)->daddr,
3482 0, IPPROTO_TCP, 0));
057cf65e 3483 }
f2e0899f 3484
86564c3f
YM
3485 pbd->global_data |=
3486 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3487}
f85582f8 3488
a848ade4
DK
3489/**
3490 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3491 *
3492 * @bp: driver handle
3493 * @skb: packet skb
3494 * @parsing_data: data to be updated
3495 * @xmit_type: xmit flags
3496 *
3497 * 57712/578xx related, when skb has encapsulation
3498 */
3499static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3500 u32 *parsing_data, u32 xmit_type)
3501{
3502 *parsing_data |=
3503 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3504 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3505 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3506
3507 if (xmit_type & XMIT_CSUM_TCP) {
3508 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3509 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3510 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3511
3512 return skb_inner_transport_header(skb) +
3513 inner_tcp_hdrlen(skb) - skb->data;
3514 }
3515
3516 /* We support checksum offload for TCP and UDP only.
3517 * No need to pass the UDP header length - it's a constant.
3518 */
3519 return skb_inner_transport_header(skb) +
3520 sizeof(struct udphdr) - skb->data;
3521}
3522
f2e0899f 3523/**
e8920674 3524 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3525 *
e8920674
DK
3526 * @bp: driver handle
3527 * @skb: packet skb
3528 * @parsing_data: data to be updated
3529 * @xmit_type: xmit flags
f2e0899f 3530 *
91226790 3531 * 57712/578xx related
f2e0899f 3532 */
91226790
DK
3533static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3534 u32 *parsing_data, u32 xmit_type)
f2e0899f 3535{
e39aece7 3536 *parsing_data |=
2de67439 3537 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3538 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3539 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3540
e39aece7
VZ
3541 if (xmit_type & XMIT_CSUM_TCP) {
3542 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3543 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3544 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3545
e39aece7 3546 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3547 }
3548 /* We support checksum offload for TCP and UDP only.
3549 * No need to pass the UDP header length - it's a constant.
3550 */
3551 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3552}
3553
a848ade4 3554/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3555static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3556 struct eth_tx_start_bd *tx_start_bd,
3557 u32 xmit_type)
93ef5c02 3558{
93ef5c02
DK
3559 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3560
a848ade4 3561 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3562 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3563
3564 if (!(xmit_type & XMIT_CSUM_TCP))
3565 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3566}
3567
f2e0899f 3568/**
e8920674 3569 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3570 *
e8920674
DK
3571 * @bp: driver handle
3572 * @skb: packet skb
3573 * @pbd: parse BD to be updated
3574 * @xmit_type: xmit flags
f2e0899f 3575 */
91226790
DK
3576static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3577 struct eth_tx_parse_bd_e1x *pbd,
3578 u32 xmit_type)
f2e0899f 3579{
e39aece7 3580 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3581
3582 /* for now NS flag is not used in Linux */
3583 pbd->global_data =
86564c3f
YM
3584 cpu_to_le16(hlen |
3585 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3586 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3587
3588 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3589 skb_network_header(skb)) >> 1;
f2e0899f 3590
e39aece7
VZ
3591 hlen += pbd->ip_hlen_w;
3592
3593 /* We support checksum offload for TCP and UDP only */
3594 if (xmit_type & XMIT_CSUM_TCP)
3595 hlen += tcp_hdrlen(skb) / 2;
3596 else
3597 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3598
3599 pbd->total_hlen_w = cpu_to_le16(hlen);
3600 hlen = hlen*2;
3601
3602 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3603 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3604
3605 } else {
3606 s8 fix = SKB_CS_OFF(skb); /* signed! */
3607
3608 DP(NETIF_MSG_TX_QUEUED,
3609 "hlen %d fix %d csum before fix %x\n",
3610 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3611
3612 /* HW bug: fixup the CSUM */
3613 pbd->tcp_pseudo_csum =
3614 bnx2x_csum_fix(skb_transport_header(skb),
3615 SKB_CS(skb), fix);
3616
3617 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3618 pbd->tcp_pseudo_csum);
3619 }
3620
3621 return hlen;
3622}
f85582f8 3623
a848ade4
DK
3624static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3625 struct eth_tx_parse_bd_e2 *pbd_e2,
3626 struct eth_tx_parse_2nd_bd *pbd2,
3627 u16 *global_data,
3628 u32 xmit_type)
3629{
e287a75c 3630 u16 hlen_w = 0;
a848ade4 3631 u8 outerip_off, outerip_len = 0;
e768fb29 3632
e287a75c
DK
3633 /* from outer IP to transport */
3634 hlen_w = (skb_inner_transport_header(skb) -
3635 skb_network_header(skb)) >> 1;
a848ade4
DK
3636
3637 /* transport len */
e768fb29 3638 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3639
e287a75c 3640 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3641
e768fb29
DK
3642 /* outer IP header info */
3643 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3644 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3645 u32 csum = (__force u32)(~iph->check) -
3646 (__force u32)iph->tot_len -
3647 (__force u32)iph->frag_off;
c957d09f 3648
e42780b6
DK
3649 outerip_len = iph->ihl << 1;
3650
a848ade4 3651 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3652 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3653 } else {
3654 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3655 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
e42780b6 3656 pbd_e2->data.tunnel_data.flags |=
05f8461b 3657 ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
a848ade4
DK
3658 }
3659
3660 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3661
3662 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3663
e42780b6
DK
3664 /* inner IP header info */
3665 if (xmit_type & XMIT_CSUM_ENC_V4) {
e287a75c 3666 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3667
3668 pbd_e2->data.tunnel_data.pseudo_csum =
3669 bswab16(~csum_tcpudp_magic(
3670 inner_ip_hdr(skb)->saddr,
3671 inner_ip_hdr(skb)->daddr,
3672 0, IPPROTO_TCP, 0));
a848ade4
DK
3673 } else {
3674 pbd_e2->data.tunnel_data.pseudo_csum =
3675 bswab16(~csum_ipv6_magic(
3676 &inner_ipv6_hdr(skb)->saddr,
3677 &inner_ipv6_hdr(skb)->daddr,
3678 0, IPPROTO_TCP, 0));
3679 }
3680
3681 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3682
3683 *global_data |=
3684 outerip_off |
a848ade4
DK
3685 (outerip_len <<
3686 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3687 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3688 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3689
3690 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3691 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3692 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3693 }
a848ade4
DK
3694}
3695
e42780b6
DK
3696static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3697 u32 xmit_type)
3698{
3699 struct ipv6hdr *ipv6;
3700
3701 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3702 return;
3703
3704 if (xmit_type & XMIT_GSO_ENC_V6)
3705 ipv6 = inner_ipv6_hdr(skb);
3706 else /* XMIT_GSO_V6 */
3707 ipv6 = ipv6_hdr(skb);
3708
3709 if (ipv6->nexthdr == NEXTHDR_IPV6)
3710 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3711}
3712
9f6c9258
DK
3713/* called with netif_tx_lock
3714 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3715 * netif_wake_queue()
3716 */
3717netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3718{
3719 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3720
9f6c9258 3721 struct netdev_queue *txq;
6383c0b3 3722 struct bnx2x_fp_txdata *txdata;
9f6c9258 3723 struct sw_tx_bd *tx_buf;
619c5cb6 3724 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3725 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3726 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3727 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3728 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3729 u32 pbd_e2_parsing_data = 0;
9f6c9258 3730 u16 pkt_prod, bd_prod;
65565884 3731 int nbd, txq_index;
9f6c9258
DK
3732 dma_addr_t mapping;
3733 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3734 int i;
3735 u8 hlen = 0;
3736 __le16 pkt_size = 0;
3737 struct ethhdr *eth;
3738 u8 mac_type = UNICAST_ADDRESS;
3739
3740#ifdef BNX2X_STOP_ON_ERROR
3741 if (unlikely(bp->panic))
3742 return NETDEV_TX_BUSY;
3743#endif
3744
6383c0b3
AE
3745 txq_index = skb_get_queue_mapping(skb);
3746 txq = netdev_get_tx_queue(dev, txq_index);
3747
55c11941 3748 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3749
65565884 3750 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3751
3752 /* enable this debug print to view the transmission queue being used
51c1a580 3753 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3754 txq_index, fp_index, txdata_index); */
9f6c9258 3755
16a5fd92 3756 /* enable this debug print to view the transmission details
51c1a580
MS
3757 DP(NETIF_MSG_TX_QUEUED,
3758 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3759 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3760
6383c0b3 3761 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3762 skb_shinfo(skb)->nr_frags +
3763 BDS_PER_TX_PKT +
3764 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3765 /* Handle special storage cases separately */
c96bdc0c
DK
3766 if (txdata->tx_ring_size == 0) {
3767 struct bnx2x_eth_q_stats *q_stats =
3768 bnx2x_fp_qstats(bp, txdata->parent_fp);
3769 q_stats->driver_filtered_tx_pkt++;
3770 dev_kfree_skb(skb);
3771 return NETDEV_TX_OK;
3772 }
2de67439
YM
3773 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3774 netif_tx_stop_queue(txq);
c96bdc0c 3775 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3776
9f6c9258
DK
3777 return NETDEV_TX_BUSY;
3778 }
3779
51c1a580 3780 DP(NETIF_MSG_TX_QUEUED,
04c46736 3781 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3782 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3783 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3784 skb->len);
9f6c9258
DK
3785
3786 eth = (struct ethhdr *)skb->data;
3787
3788 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3789 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3790 if (is_broadcast_ether_addr(eth->h_dest))
3791 mac_type = BROADCAST_ADDRESS;
3792 else
3793 mac_type = MULTICAST_ADDRESS;
3794 }
3795
91226790 3796#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3797 /* First, check if we need to linearize the skb (due to FW
3798 restrictions). No need to check fragmentation if page size > 8K
3799 (there will be no violation to FW restrictions) */
3800 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3801 /* Statistics of linearization */
3802 bp->lin_cnt++;
3803 if (skb_linearize(skb) != 0) {
51c1a580
MS
3804 DP(NETIF_MSG_TX_QUEUED,
3805 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3806 dev_kfree_skb_any(skb);
3807 return NETDEV_TX_OK;
3808 }
3809 }
3810#endif
619c5cb6
VZ
3811 /* Map skb linear data for DMA */
3812 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3813 skb_headlen(skb), DMA_TO_DEVICE);
3814 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3815 DP(NETIF_MSG_TX_QUEUED,
3816 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3817 dev_kfree_skb_any(skb);
3818 return NETDEV_TX_OK;
3819 }
9f6c9258
DK
3820 /*
3821 Please read carefully. First we use one BD which we mark as start,
3822 then we have a parsing info BD (used for TSO or xsum),
3823 and only then we have the rest of the TSO BDs.
3824 (don't forget to mark the last one as last,
3825 and to unmap only AFTER you write to the BD ...)
3826 And above all, all pdb sizes are in words - NOT DWORDS!
3827 */
3828
619c5cb6
VZ
3829 /* get current pkt produced now - advance it just before sending packet
3830 * since mapping of pages may fail and cause packet to be dropped
3831 */
6383c0b3
AE
3832 pkt_prod = txdata->tx_pkt_prod;
3833 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3834
619c5cb6
VZ
3835 /* get a tx_buf and first BD
3836 * tx_start_bd may be changed during SPLIT,
3837 * but first_bd will always stay first
3838 */
6383c0b3
AE
3839 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3840 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3841 first_bd = tx_start_bd;
9f6c9258
DK
3842
3843 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3844
eeed018c
MK
3845 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3846 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3847 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3848 } else if (bp->ptp_tx_skb) {
3849 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3850 } else {
3851 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3852 /* schedule check for Tx timestamp */
3853 bp->ptp_tx_skb = skb_get(skb);
3854 bp->ptp_tx_start = jiffies;
3855 schedule_work(&bp->ptp_task);
3856 }
3857 }
3858
91226790
DK
3859 /* header nbd: indirectly zero other flags! */
3860 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3861
3862 /* remember the first BD of the packet */
6383c0b3 3863 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3864 tx_buf->skb = skb;
3865 tx_buf->flags = 0;
3866
3867 DP(NETIF_MSG_TX_QUEUED,
3868 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3869 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3870
df8a39de 3871 if (skb_vlan_tag_present(skb)) {
523224a3 3872 tx_start_bd->vlan_or_ethertype =
df8a39de 3873 cpu_to_le16(skb_vlan_tag_get(skb));
523224a3
DK
3874 tx_start_bd->bd_flags.as_bitfield |=
3875 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3876 } else {
3877 /* when transmitting in a vf, start bd must hold the ethertype
3878 * for fw to enforce it
3879 */
ea36475a 3880#ifndef BNX2X_STOP_ON_ERROR
91226790 3881 if (IS_VF(bp))
ea36475a 3882#endif
dc1ba591
AE
3883 tx_start_bd->vlan_or_ethertype =
3884 cpu_to_le16(ntohs(eth->h_proto));
ea36475a 3885#ifndef BNX2X_STOP_ON_ERROR
91226790 3886 else
dc1ba591
AE
3887 /* used by FW for packet accounting */
3888 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
ea36475a 3889#endif
dc1ba591 3890 }
9f6c9258 3891
91226790
DK
3892 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3893
9f6c9258
DK
3894 /* turn on parsing and get a BD */
3895 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3896
93ef5c02
DK
3897 if (xmit_type & XMIT_CSUM)
3898 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3899
619c5cb6 3900 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3901 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3902 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3903
3904 if (xmit_type & XMIT_CSUM_ENC) {
3905 u16 global_data = 0;
3906
3907 /* Set PBD in enc checksum offload case */
3908 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3909 &pbd_e2_parsing_data,
3910 xmit_type);
3911
3912 /* turn on 2nd parsing and get a BD */
3913 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3914
3915 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3916
3917 memset(pbd2, 0, sizeof(*pbd2));
3918
3919 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3920 (skb_inner_network_header(skb) -
3921 skb->data) >> 1;
3922
3923 if (xmit_type & XMIT_GSO_ENC)
3924 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3925 &global_data,
3926 xmit_type);
3927
3928 pbd2->global_data = cpu_to_le16(global_data);
3929
3930 /* add addition parse BD indication to start BD */
3931 SET_FLAG(tx_start_bd->general_data,
3932 ETH_TX_START_BD_PARSE_NBDS, 1);
3933 /* set encapsulation flag in start BD */
3934 SET_FLAG(tx_start_bd->general_data,
3935 ETH_TX_START_BD_TUNNEL_EXIST, 1);
fe26566d
DK
3936
3937 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3938
a848ade4
DK
3939 nbd++;
3940 } else if (xmit_type & XMIT_CSUM) {
91226790 3941 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3942 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3943 &pbd_e2_parsing_data,
3944 xmit_type);
a848ade4 3945 }
dc1ba591 3946
e42780b6 3947 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
babe723d
YM
3948 /* Add the macs to the parsing BD if this is a vf or if
3949 * Tx Switching is enabled.
3950 */
91226790
DK
3951 if (IS_VF(bp)) {
3952 /* override GRE parameters in BD */
3953 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3954 &pbd_e2->data.mac_addr.src_mid,
3955 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3956 eth->h_source);
91226790 3957
babe723d
YM
3958 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3959 &pbd_e2->data.mac_addr.dst_mid,
3960 &pbd_e2->data.mac_addr.dst_lo,
3961 eth->h_dest);
ea36475a
YM
3962 } else {
3963 if (bp->flags & TX_SWITCHING)
3964 bnx2x_set_fw_mac_addr(
3965 &pbd_e2->data.mac_addr.dst_hi,
3966 &pbd_e2->data.mac_addr.dst_mid,
3967 &pbd_e2->data.mac_addr.dst_lo,
3968 eth->h_dest);
3969#ifdef BNX2X_STOP_ON_ERROR
3970 /* Enforce security is always set in Stop on Error -
3971 * source mac should be present in the parsing BD
3972 */
3973 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3974 &pbd_e2->data.mac_addr.src_mid,
3975 &pbd_e2->data.mac_addr.src_lo,
3976 eth->h_source);
3977#endif
619c5cb6 3978 }
96bed4b9
YM
3979
3980 SET_FLAG(pbd_e2_parsing_data,
3981 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3982 } else {
96bed4b9 3983 u16 global_data = 0;
6383c0b3 3984 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3985 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3986 /* Set PBD in checksum offload case */
3987 if (xmit_type & XMIT_CSUM)
3988 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3989
96bed4b9
YM
3990 SET_FLAG(global_data,
3991 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3992 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3993 }
3994
f85582f8 3995 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3996 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3997 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3998 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3999 pkt_size = tx_start_bd->nbytes;
4000
51c1a580 4001 DP(NETIF_MSG_TX_QUEUED,
91226790 4002 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 4003 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 4004 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
4005 tx_start_bd->bd_flags.as_bitfield,
4006 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
4007
4008 if (xmit_type & XMIT_GSO) {
4009
4010 DP(NETIF_MSG_TX_QUEUED,
4011 "TSO packet len %d hlen %d total len %d tso size %d\n",
4012 skb->len, hlen, skb_headlen(skb),
4013 skb_shinfo(skb)->gso_size);
4014
4015 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4016
91226790
DK
4017 if (unlikely(skb_headlen(skb) > hlen)) {
4018 nbd++;
6383c0b3
AE
4019 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4020 &tx_start_bd, hlen,
91226790
DK
4021 bd_prod);
4022 }
619c5cb6 4023 if (!CHIP_IS_E1x(bp))
e42780b6
DK
4024 pbd_e2_parsing_data |=
4025 (skb_shinfo(skb)->gso_size <<
4026 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4027 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f 4028 else
e42780b6 4029 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 4030 }
2297a2da
VZ
4031
4032 /* Set the PBD's parsing_data field if not zero
4033 * (for the chips newer than 57711).
4034 */
4035 if (pbd_e2_parsing_data)
4036 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4037
9f6c9258
DK
4038 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4039
f85582f8 4040 /* Handle fragmented skb */
9f6c9258
DK
4041 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4042 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4043
9e903e08
ED
4044 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4045 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 4046 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 4047 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 4048
51c1a580
MS
4049 DP(NETIF_MSG_TX_QUEUED,
4050 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
4051
4052 /* we need unmap all buffers already mapped
4053 * for this SKB;
4054 * first_bd->nbd need to be properly updated
4055 * before call to bnx2x_free_tx_pkt
4056 */
4057 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 4058 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
4059 TX_BD(txdata->tx_pkt_prod),
4060 &pkts_compl, &bytes_compl);
619c5cb6
VZ
4061 return NETDEV_TX_OK;
4062 }
4063
9f6c9258 4064 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 4065 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4066 if (total_pkt_bd == NULL)
6383c0b3 4067 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4068
9f6c9258
DK
4069 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4070 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
4071 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4072 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 4073 nbd++;
9f6c9258
DK
4074
4075 DP(NETIF_MSG_TX_QUEUED,
4076 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4077 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4078 le16_to_cpu(tx_data_bd->nbytes));
4079 }
4080
4081 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4082
619c5cb6
VZ
4083 /* update with actual num BDs */
4084 first_bd->nbd = cpu_to_le16(nbd);
4085
9f6c9258
DK
4086 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4087
4088 /* now send a tx doorbell, counting the next BD
4089 * if the packet contains or ends with it
4090 */
4091 if (TX_BD_POFF(bd_prod) < nbd)
4092 nbd++;
4093
619c5cb6
VZ
4094 /* total_pkt_bytes should be set on the first data BD if
4095 * it's not an LSO packet and there is more than one
4096 * data BD. In this case pkt_size is limited by an MTU value.
4097 * However we prefer to set it for an LSO packet (while we don't
4098 * have to) in order to save some CPU cycles in a none-LSO
4099 * case, when we much more care about them.
4100 */
9f6c9258
DK
4101 if (total_pkt_bd != NULL)
4102 total_pkt_bd->total_pkt_bytes = pkt_size;
4103
523224a3 4104 if (pbd_e1x)
9f6c9258 4105 DP(NETIF_MSG_TX_QUEUED,
51c1a580 4106 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
4107 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4108 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4109 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4110 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
4111 if (pbd_e2)
4112 DP(NETIF_MSG_TX_QUEUED,
4113 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
4114 pbd_e2,
4115 pbd_e2->data.mac_addr.dst_hi,
4116 pbd_e2->data.mac_addr.dst_mid,
4117 pbd_e2->data.mac_addr.dst_lo,
4118 pbd_e2->data.mac_addr.src_hi,
4119 pbd_e2->data.mac_addr.src_mid,
4120 pbd_e2->data.mac_addr.src_lo,
f2e0899f 4121 pbd_e2->parsing_data);
9f6c9258
DK
4122 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4123
2df1a70a
TH
4124 netdev_tx_sent_queue(txq, skb->len);
4125
8373c57d
WB
4126 skb_tx_timestamp(skb);
4127
6383c0b3 4128 txdata->tx_pkt_prod++;
9f6c9258
DK
4129 /*
4130 * Make sure that the BD data is updated before updating the producer
4131 * since FW might read the BD right after the producer is updated.
4132 * This is only applicable for weak-ordered memory model archs such
4133 * as IA-64. The following barrier is also mandatory since FW will
4134 * assumes packets must have BDs.
4135 */
4136 wmb();
4137
6383c0b3 4138 txdata->tx_db.data.prod += nbd;
9f6c9258 4139 barrier();
f85582f8 4140
6383c0b3 4141 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4142
4143 mmiowb();
4144
6383c0b3 4145 txdata->tx_bd_prod += nbd;
9f6c9258 4146
7df2dc6b 4147 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4148 netif_tx_stop_queue(txq);
4149
4150 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4151 * ordering of set_bit() in netif_tx_stop_queue() and read of
4152 * fp->bd_tx_cons */
4153 smp_mb();
4154
15192a8c 4155 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4156 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4157 netif_tx_wake_queue(txq);
4158 }
6383c0b3 4159 txdata->tx_pkt++;
9f6c9258
DK
4160
4161 return NETDEV_TX_OK;
4162}
f85582f8 4163
6383c0b3
AE
4164/**
4165 * bnx2x_setup_tc - routine to configure net_device for multi tc
4166 *
4167 * @netdev: net device to configure
4168 * @tc: number of traffic classes to enable
4169 *
4170 * callback connected to the ndo_setup_tc function pointer
4171 */
4172int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4173{
4174 int cos, prio, count, offset;
4175 struct bnx2x *bp = netdev_priv(dev);
4176
4177 /* setup tc must be called under rtnl lock */
4178 ASSERT_RTNL();
4179
16a5fd92 4180 /* no traffic classes requested. Aborting */
6383c0b3
AE
4181 if (!num_tc) {
4182 netdev_reset_tc(dev);
4183 return 0;
4184 }
4185
4186 /* requested to support too many traffic classes */
4187 if (num_tc > bp->max_cos) {
6bf07b8e 4188 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4189 num_tc, bp->max_cos);
6383c0b3
AE
4190 return -EINVAL;
4191 }
4192
4193 /* declare amount of supported traffic classes */
4194 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4195 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4196 return -EINVAL;
4197 }
4198
4199 /* configure priority to traffic class mapping */
4200 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4201 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4202 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4203 "mapping priority %d to tc %d\n",
6383c0b3
AE
4204 prio, bp->prio_to_cos[prio]);
4205 }
4206
16a5fd92 4207 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4208 This can be used for ets or pfc, and save the effort of setting
4209 up a multio class queue disc or negotiating DCBX with a switch
4210 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4211 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4212 for (prio = 1; prio < 16; prio++) {
4213 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4214 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4215 } */
4216
4217 /* configure traffic class to transmission queue mapping */
4218 for (cos = 0; cos < bp->max_cos; cos++) {
4219 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4220 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4221 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4222 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4223 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4224 cos, offset, count);
4225 }
4226
4227 return 0;
4228}
4229
9f6c9258
DK
4230/* called with rtnl_lock */
4231int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4232{
4233 struct sockaddr *addr = p;
4234 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4235 int rc = 0;
9f6c9258 4236
2e98ffc2 4237 if (!is_valid_ether_addr(addr->sa_data)) {
51c1a580 4238 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4239 return -EINVAL;
51c1a580 4240 }
614c76df 4241
2e98ffc2
DK
4242 if (IS_MF_STORAGE_ONLY(bp)) {
4243 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
9f6c9258 4244 return -EINVAL;
51c1a580 4245 }
9f6c9258 4246
619c5cb6
VZ
4247 if (netif_running(dev)) {
4248 rc = bnx2x_set_eth_mac(bp, false);
4249 if (rc)
4250 return rc;
4251 }
4252
9f6c9258 4253 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4254
523224a3 4255 if (netif_running(dev))
619c5cb6 4256 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4257
619c5cb6 4258 return rc;
9f6c9258
DK
4259}
4260
b3b83c3f
DK
4261static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4262{
4263 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4264 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4265 u8 cos;
b3b83c3f
DK
4266
4267 /* Common */
55c11941 4268
b3b83c3f
DK
4269 if (IS_FCOE_IDX(fp_index)) {
4270 memset(sb, 0, sizeof(union host_hc_status_block));
4271 fp->status_blk_mapping = 0;
b3b83c3f 4272 } else {
b3b83c3f 4273 /* status blocks */
619c5cb6 4274 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4275 BNX2X_PCI_FREE(sb->e2_sb,
4276 bnx2x_fp(bp, fp_index,
4277 status_blk_mapping),
4278 sizeof(struct host_hc_status_block_e2));
4279 else
4280 BNX2X_PCI_FREE(sb->e1x_sb,
4281 bnx2x_fp(bp, fp_index,
4282 status_blk_mapping),
4283 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4284 }
55c11941 4285
b3b83c3f
DK
4286 /* Rx */
4287 if (!skip_rx_queue(bp, fp_index)) {
4288 bnx2x_free_rx_bds(fp);
4289
4290 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4291 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4292 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4293 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4294 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4295
4296 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4297 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4298 sizeof(struct eth_fast_path_rx_cqe) *
4299 NUM_RCQ_BD);
4300
4301 /* SGE ring */
4302 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4303 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4304 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4305 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4306 }
4307
4308 /* Tx */
4309 if (!skip_tx_queue(bp, fp_index)) {
4310 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4311 for_each_cos_in_tx_queue(fp, cos) {
65565884 4312 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4313
51c1a580 4314 DP(NETIF_MSG_IFDOWN,
94f05b0f 4315 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4316 fp_index, cos, txdata->cid);
4317
4318 BNX2X_FREE(txdata->tx_buf_ring);
4319 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4320 txdata->tx_desc_mapping,
4321 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4322 }
b3b83c3f
DK
4323 }
4324 /* end of fastpath */
4325}
4326
a8f47eb7 4327static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4328{
4329 int i;
4330 for_each_cnic_queue(bp, i)
4331 bnx2x_free_fp_mem_at(bp, i);
4332}
4333
b3b83c3f
DK
4334void bnx2x_free_fp_mem(struct bnx2x *bp)
4335{
4336 int i;
55c11941 4337 for_each_eth_queue(bp, i)
b3b83c3f
DK
4338 bnx2x_free_fp_mem_at(bp, i);
4339}
4340
1191cb83 4341static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4342{
4343 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4344 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4345 bnx2x_fp(bp, index, sb_index_values) =
4346 (__le16 *)status_blk.e2_sb->sb.index_values;
4347 bnx2x_fp(bp, index, sb_running_index) =
4348 (__le16 *)status_blk.e2_sb->sb.running_index;
4349 } else {
4350 bnx2x_fp(bp, index, sb_index_values) =
4351 (__le16 *)status_blk.e1x_sb->sb.index_values;
4352 bnx2x_fp(bp, index, sb_running_index) =
4353 (__le16 *)status_blk.e1x_sb->sb.running_index;
4354 }
4355}
4356
1191cb83
ED
4357/* Returns the number of actually allocated BDs */
4358static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4359 int rx_ring_size)
4360{
4361 struct bnx2x *bp = fp->bp;
4362 u16 ring_prod, cqe_ring_prod;
4363 int i, failure_cnt = 0;
4364
4365 fp->rx_comp_cons = 0;
4366 cqe_ring_prod = ring_prod = 0;
4367
4368 /* This routine is called only during fo init so
4369 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4370 */
4371 for (i = 0; i < rx_ring_size; i++) {
996dedba 4372 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4373 failure_cnt++;
4374 continue;
4375 }
4376 ring_prod = NEXT_RX_IDX(ring_prod);
4377 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4378 WARN_ON(ring_prod <= (i - failure_cnt));
4379 }
4380
4381 if (failure_cnt)
4382 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4383 i - failure_cnt, fp->index);
4384
4385 fp->rx_bd_prod = ring_prod;
4386 /* Limit the CQE producer by the CQE ring size */
4387 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4388 cqe_ring_prod);
4389 fp->rx_pkt = fp->rx_calls = 0;
4390
15192a8c 4391 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4392
4393 return i - failure_cnt;
4394}
4395
4396static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4397{
4398 int i;
4399
4400 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4401 struct eth_rx_cqe_next_page *nextpg;
4402
4403 nextpg = (struct eth_rx_cqe_next_page *)
4404 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4405 nextpg->addr_hi =
4406 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4407 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4408 nextpg->addr_lo =
4409 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4410 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4411 }
4412}
4413
b3b83c3f
DK
4414static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4415{
4416 union host_hc_status_block *sb;
4417 struct bnx2x_fastpath *fp = &bp->fp[index];
4418 int ring_size = 0;
6383c0b3 4419 u8 cos;
c2188952 4420 int rx_ring_size = 0;
b3b83c3f 4421
2e98ffc2 4422 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
614c76df
DK
4423 rx_ring_size = MIN_RX_SIZE_NONTPA;
4424 bp->rx_ring_size = rx_ring_size;
55c11941 4425 } else if (!bp->rx_ring_size) {
c2188952
VZ
4426 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4427
065f8b92
YM
4428 if (CHIP_IS_E3(bp)) {
4429 u32 cfg = SHMEM_RD(bp,
4430 dev_info.port_hw_config[BP_PORT(bp)].
4431 default_cfg);
4432
4433 /* Decrease ring size for 1G functions */
4434 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4435 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4436 rx_ring_size /= 10;
4437 }
d760fc37 4438
c2188952
VZ
4439 /* allocate at least number of buffers required by FW */
4440 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4441 MIN_RX_SIZE_TPA, rx_ring_size);
4442
4443 bp->rx_ring_size = rx_ring_size;
614c76df 4444 } else /* if rx_ring_size specified - use it */
c2188952 4445 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4446
04c46736
YM
4447 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4448
b3b83c3f
DK
4449 /* Common */
4450 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4451
b3b83c3f 4452 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4453 /* status blocks */
cd2b0389
JP
4454 if (!CHIP_IS_E1x(bp)) {
4455 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4456 sizeof(struct host_hc_status_block_e2));
4457 if (!sb->e2_sb)
4458 goto alloc_mem_err;
4459 } else {
4460 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4461 sizeof(struct host_hc_status_block_e1x));
4462 if (!sb->e1x_sb)
4463 goto alloc_mem_err;
4464 }
b3b83c3f 4465 }
8eef2af1
DK
4466
4467 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4468 * set shortcuts for it.
4469 */
4470 if (!IS_FCOE_IDX(index))
4471 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4472
4473 /* Tx */
4474 if (!skip_tx_queue(bp, index)) {
4475 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4476 for_each_cos_in_tx_queue(fp, cos) {
65565884 4477 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4478
51c1a580
MS
4479 DP(NETIF_MSG_IFUP,
4480 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4481 index, cos);
4482
cd2b0389
JP
4483 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4484 sizeof(struct sw_tx_bd),
4485 GFP_KERNEL);
4486 if (!txdata->tx_buf_ring)
4487 goto alloc_mem_err;
4488 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4489 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4490 if (!txdata->tx_desc_ring)
4491 goto alloc_mem_err;
6383c0b3 4492 }
b3b83c3f
DK
4493 }
4494
4495 /* Rx */
4496 if (!skip_rx_queue(bp, index)) {
4497 /* fastpath rx rings: rx_buf rx_desc rx_comp */
cd2b0389
JP
4498 bnx2x_fp(bp, index, rx_buf_ring) =
4499 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4500 if (!bnx2x_fp(bp, index, rx_buf_ring))
4501 goto alloc_mem_err;
4502 bnx2x_fp(bp, index, rx_desc_ring) =
4503 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4504 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4505 if (!bnx2x_fp(bp, index, rx_desc_ring))
4506 goto alloc_mem_err;
b3b83c3f 4507
75b29459 4508 /* Seed all CQEs by 1s */
cd2b0389
JP
4509 bnx2x_fp(bp, index, rx_comp_ring) =
4510 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4511 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4512 if (!bnx2x_fp(bp, index, rx_comp_ring))
4513 goto alloc_mem_err;
b3b83c3f
DK
4514
4515 /* SGE ring */
cd2b0389
JP
4516 bnx2x_fp(bp, index, rx_page_ring) =
4517 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4518 GFP_KERNEL);
4519 if (!bnx2x_fp(bp, index, rx_page_ring))
4520 goto alloc_mem_err;
4521 bnx2x_fp(bp, index, rx_sge_ring) =
4522 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4523 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4524 if (!bnx2x_fp(bp, index, rx_sge_ring))
4525 goto alloc_mem_err;
b3b83c3f
DK
4526 /* RX BD ring */
4527 bnx2x_set_next_page_rx_bd(fp);
4528
4529 /* CQ ring */
4530 bnx2x_set_next_page_rx_cq(fp);
4531
4532 /* BDs */
4533 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4534 if (ring_size < rx_ring_size)
4535 goto alloc_mem_err;
4536 }
4537
4538 return 0;
4539
4540/* handles low memory cases */
4541alloc_mem_err:
4542 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4543 index, ring_size);
4544 /* FW will drop all packets if queue is not big enough,
4545 * In these cases we disable the queue
6383c0b3 4546 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f 4547 */
7e6b4d44 4548 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
eb722d7a 4549 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4550 /* release memory allocated for this queue */
4551 bnx2x_free_fp_mem_at(bp, index);
4552 return -ENOMEM;
4553 }
4554 return 0;
4555}
4556
a8f47eb7 4557static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4558{
4559 if (!NO_FCOE(bp))
4560 /* FCoE */
4561 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4562 /* we will fail load process instead of mark
4563 * NO_FCOE_FLAG
4564 */
4565 return -ENOMEM;
4566
4567 return 0;
4568}
4569
a8f47eb7 4570static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
b3b83c3f
DK
4571{
4572 int i;
4573
55c11941
MS
4574 /* 1. Allocate FP for leading - fatal if error
4575 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4576 */
4577
4578 /* leading */
4579 if (bnx2x_alloc_fp_mem_at(bp, 0))
4580 return -ENOMEM;
6383c0b3 4581
b3b83c3f
DK
4582 /* RSS */
4583 for_each_nondefault_eth_queue(bp, i)
4584 if (bnx2x_alloc_fp_mem_at(bp, i))
4585 break;
4586
4587 /* handle memory failures */
4588 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4589 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4590
4591 WARN_ON(delta < 0);
4864a16a 4592 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4593 if (CNIC_SUPPORT(bp))
4594 /* move non eth FPs next to last eth FP
4595 * must be done in that order
4596 * FCOE_IDX < FWD_IDX < OOO_IDX
4597 */
b3b83c3f 4598
55c11941
MS
4599 /* move FCoE fp even NO_FCOE_FLAG is on */
4600 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4601 bp->num_ethernet_queues -= delta;
4602 bp->num_queues = bp->num_ethernet_queues +
4603 bp->num_cnic_queues;
b3b83c3f
DK
4604 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4605 bp->num_queues + delta, bp->num_queues);
4606 }
4607
4608 return 0;
4609}
d6214d7a 4610
523224a3
DK
4611void bnx2x_free_mem_bp(struct bnx2x *bp)
4612{
c3146eb6
DK
4613 int i;
4614
4615 for (i = 0; i < bp->fp_array_size; i++)
4616 kfree(bp->fp[i].tpa_info);
523224a3 4617 kfree(bp->fp);
15192a8c
BW
4618 kfree(bp->sp_objs);
4619 kfree(bp->fp_stats);
65565884 4620 kfree(bp->bnx2x_txq);
523224a3
DK
4621 kfree(bp->msix_table);
4622 kfree(bp->ilt);
4623}
4624
0329aba1 4625int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4626{
4627 struct bnx2x_fastpath *fp;
4628 struct msix_entry *tbl;
4629 struct bnx2x_ilt *ilt;
6383c0b3 4630 int msix_table_size = 0;
55c11941 4631 int fp_array_size, txq_array_size;
15192a8c 4632 int i;
6383c0b3
AE
4633
4634 /*
4635 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4636 * path IGU SBs plus default SB (for PF only).
6383c0b3 4637 */
1ab4434c
AE
4638 msix_table_size = bp->igu_sb_cnt;
4639 if (IS_PF(bp))
4640 msix_table_size++;
4641 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4642
6383c0b3 4643 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4644 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4645 bp->fp_array_size = fp_array_size;
4646 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4647
c3146eb6 4648 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4649 if (!fp)
4650 goto alloc_err;
c3146eb6 4651 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4652 fp[i].tpa_info =
4653 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4654 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4655 if (!(fp[i].tpa_info))
4656 goto alloc_err;
4657 }
4658
523224a3
DK
4659 bp->fp = fp;
4660
15192a8c 4661 /* allocate sp objs */
c3146eb6 4662 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4663 GFP_KERNEL);
4664 if (!bp->sp_objs)
4665 goto alloc_err;
4666
4667 /* allocate fp_stats */
c3146eb6 4668 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4669 GFP_KERNEL);
4670 if (!bp->fp_stats)
4671 goto alloc_err;
4672
65565884 4673 /* Allocate memory for the transmission queues array */
55c11941
MS
4674 txq_array_size =
4675 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4676 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4677
4678 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4679 GFP_KERNEL);
65565884
MS
4680 if (!bp->bnx2x_txq)
4681 goto alloc_err;
4682
523224a3 4683 /* msix table */
01e23742 4684 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4685 if (!tbl)
4686 goto alloc_err;
4687 bp->msix_table = tbl;
4688
4689 /* ilt */
4690 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4691 if (!ilt)
4692 goto alloc_err;
4693 bp->ilt = ilt;
4694
4695 return 0;
4696alloc_err:
4697 bnx2x_free_mem_bp(bp);
4698 return -ENOMEM;
523224a3
DK
4699}
4700
a9fccec7 4701int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4702{
4703 struct bnx2x *bp = netdev_priv(dev);
4704
4705 if (unlikely(!netif_running(dev)))
4706 return 0;
4707
5d07d868 4708 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4709 return bnx2x_nic_load(bp, LOAD_NORMAL);
4710}
4711
1ac9e428
YR
4712int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4713{
4714 u32 sel_phy_idx = 0;
4715 if (bp->link_params.num_phys <= 1)
4716 return INT_PHY;
4717
4718 if (bp->link_vars.link_up) {
4719 sel_phy_idx = EXT_PHY1;
4720 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4721 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4722 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4723 sel_phy_idx = EXT_PHY2;
4724 } else {
4725
4726 switch (bnx2x_phy_selection(&bp->link_params)) {
4727 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4728 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4729 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4730 sel_phy_idx = EXT_PHY1;
4731 break;
4732 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4733 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4734 sel_phy_idx = EXT_PHY2;
4735 break;
4736 }
4737 }
4738
4739 return sel_phy_idx;
1ac9e428
YR
4740}
4741int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4742{
4743 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4744 /*
2de67439 4745 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4746 * swapping is enabled). So when swapping is enabled, we need to reverse
4747 * the configuration
4748 */
4749
4750 if (bp->link_params.multi_phy_config &
4751 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4752 if (sel_phy_idx == EXT_PHY1)
4753 sel_phy_idx = EXT_PHY2;
4754 else if (sel_phy_idx == EXT_PHY2)
4755 sel_phy_idx = EXT_PHY1;
4756 }
4757 return LINK_CONFIG_IDX(sel_phy_idx);
4758}
4759
55c11941 4760#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4761int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4762{
4763 struct bnx2x *bp = netdev_priv(dev);
4764 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4765
4766 switch (type) {
4767 case NETDEV_FCOE_WWNN:
4768 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4769 cp->fcoe_wwn_node_name_lo);
4770 break;
4771 case NETDEV_FCOE_WWPN:
4772 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4773 cp->fcoe_wwn_port_name_lo);
4774 break;
4775 default:
51c1a580 4776 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4777 return -EINVAL;
4778 }
4779
4780 return 0;
4781}
4782#endif
4783
9f6c9258
DK
4784/* called with rtnl_lock */
4785int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4786{
4787 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4788
4789 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4790 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4791 return -EAGAIN;
4792 }
4793
4794 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4795 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4796 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4797 return -EINVAL;
51c1a580 4798 }
9f6c9258
DK
4799
4800 /* This does not race with packet allocation
4801 * because the actual alloc size is
4802 * only updated as part of load
4803 */
4804 dev->mtu = new_mtu;
4805
66371c44
MM
4806 return bnx2x_reload_if_running(dev);
4807}
4808
c8f44aff 4809netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4810 netdev_features_t features)
66371c44
MM
4811{
4812 struct bnx2x *bp = netdev_priv(dev);
4813
909d9faa
YM
4814 if (pci_num_vf(bp->pdev)) {
4815 netdev_features_t changed = dev->features ^ features;
4816
4817 /* Revert the requested changes in features if they
4818 * would require internal reload of PF in bnx2x_set_features().
4819 */
4820 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4821 features &= ~NETIF_F_RXCSUM;
4822 features |= dev->features & NETIF_F_RXCSUM;
4823 }
4824
4825 if (changed & NETIF_F_LOOPBACK) {
4826 features &= ~NETIF_F_LOOPBACK;
4827 features |= dev->features & NETIF_F_LOOPBACK;
4828 }
4829 }
4830
66371c44 4831 /* TPA requires Rx CSUM offloading */
aebf6244 4832 if (!(features & NETIF_F_RXCSUM)) {
66371c44 4833 features &= ~NETIF_F_LRO;
621b4d66
DK
4834 features &= ~NETIF_F_GRO;
4835 }
66371c44
MM
4836
4837 return features;
4838}
4839
c8f44aff 4840int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4841{
4842 struct bnx2x *bp = netdev_priv(dev);
f8dcb5e3 4843 netdev_features_t changes = features ^ dev->features;
538dd2e3 4844 bool bnx2x_reload = false;
f8dcb5e3 4845 int rc;
621b4d66 4846
909d9faa
YM
4847 /* VFs or non SRIOV PFs should be able to change loopback feature */
4848 if (!pci_num_vf(bp->pdev)) {
4849 if (features & NETIF_F_LOOPBACK) {
4850 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4851 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4852 bnx2x_reload = true;
4853 }
4854 } else {
4855 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4856 bp->link_params.loopback_mode = LOOPBACK_NONE;
4857 bnx2x_reload = true;
4858 }
538dd2e3
MB
4859 }
4860 }
4861
16a5fd92 4862 /* if GRO is changed while LRO is enabled, don't force a reload */
f8dcb5e3
MS
4863 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4864 changes &= ~NETIF_F_GRO;
8802f579 4865
aebf6244 4866 /* if GRO is changed while HW TPA is off, don't force a reload */
f8dcb5e3
MS
4867 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4868 changes &= ~NETIF_F_GRO;
aebf6244 4869
8802f579 4870 if (changes)
538dd2e3 4871 bnx2x_reload = true;
8802f579 4872
538dd2e3 4873 if (bnx2x_reload) {
f8dcb5e3
MS
4874 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4875 dev->features = features;
4876 rc = bnx2x_reload_if_running(dev);
4877 return rc ? rc : 1;
4878 }
66371c44 4879 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4880 }
4881
66371c44 4882 return 0;
9f6c9258
DK
4883}
4884
4885void bnx2x_tx_timeout(struct net_device *dev)
4886{
4887 struct bnx2x *bp = netdev_priv(dev);
4888
4889#ifdef BNX2X_STOP_ON_ERROR
4890 if (!bp->panic)
4891 bnx2x_panic();
4892#endif
7be08a72 4893
9f6c9258 4894 /* This allows the netif to be shutdown gracefully before resetting */
230bb0f3 4895 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
9f6c9258
DK
4896}
4897
9f6c9258
DK
4898int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4899{
4900 struct net_device *dev = pci_get_drvdata(pdev);
4901 struct bnx2x *bp;
4902
4903 if (!dev) {
4904 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4905 return -ENODEV;
4906 }
4907 bp = netdev_priv(dev);
4908
4909 rtnl_lock();
4910
4911 pci_save_state(pdev);
4912
4913 if (!netif_running(dev)) {
4914 rtnl_unlock();
4915 return 0;
4916 }
4917
4918 netif_device_detach(dev);
4919
5d07d868 4920 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4921
4922 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4923
4924 rtnl_unlock();
4925
4926 return 0;
4927}
4928
4929int bnx2x_resume(struct pci_dev *pdev)
4930{
4931 struct net_device *dev = pci_get_drvdata(pdev);
4932 struct bnx2x *bp;
4933 int rc;
4934
4935 if (!dev) {
4936 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4937 return -ENODEV;
4938 }
4939 bp = netdev_priv(dev);
4940
909d9faa
YM
4941 if (pci_num_vf(bp->pdev)) {
4942 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4943 return -EPERM;
4944 }
4945
9f6c9258 4946 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4947 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4948 return -EAGAIN;
4949 }
4950
4951 rtnl_lock();
4952
4953 pci_restore_state(pdev);
4954
4955 if (!netif_running(dev)) {
4956 rtnl_unlock();
4957 return 0;
4958 }
4959
4960 bnx2x_set_power_state(bp, PCI_D0);
4961 netif_device_attach(dev);
4962
4963 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4964
4965 rtnl_unlock();
4966
4967 return rc;
4968}
619c5cb6 4969
619c5cb6
VZ
4970void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4971 u32 cid)
4972{
b9871bcf
AE
4973 if (!cxt) {
4974 BNX2X_ERR("bad context pointer %p\n", cxt);
4975 return;
4976 }
4977
619c5cb6
VZ
4978 /* ustorm cxt validation */
4979 cxt->ustorm_ag_context.cdu_usage =
4980 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4981 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4982 /* xcontext validation */
4983 cxt->xstorm_ag_context.cdu_reserved =
4984 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4985 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4986}
4987
1191cb83
ED
4988static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4989 u8 fw_sb_id, u8 sb_index,
4990 u8 ticks)
619c5cb6 4991{
619c5cb6
VZ
4992 u32 addr = BAR_CSTRORM_INTMEM +
4993 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4994 REG_WR8(bp, addr, ticks);
51c1a580
MS
4995 DP(NETIF_MSG_IFUP,
4996 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4997 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4998}
4999
1191cb83
ED
5000static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5001 u16 fw_sb_id, u8 sb_index,
5002 u8 disable)
619c5cb6
VZ
5003{
5004 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5005 u32 addr = BAR_CSTRORM_INTMEM +
5006 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 5007 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
5008 /* clear and set */
5009 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5010 flags |= enable_flag;
0c14e5ce 5011 REG_WR8(bp, addr, flags);
51c1a580
MS
5012 DP(NETIF_MSG_IFUP,
5013 "port %x fw_sb_id %d sb_index %d disable %d\n",
5014 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
5015}
5016
5017void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5018 u8 sb_index, u8 disable, u16 usec)
5019{
5020 int port = BP_PORT(bp);
5021 u8 ticks = usec / BNX2X_BTR;
5022
5023 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5024
5025 disable = disable ? 1 : (usec ? 0 : 1);
5026 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5027}
230bb0f3
YM
5028
5029void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5030 u32 verbose)
5031{
4e857c58 5032 smp_mb__before_atomic();
230bb0f3 5033 set_bit(flag, &bp->sp_rtnl_state);
4e857c58 5034 smp_mb__after_atomic();
230bb0f3
YM
5035 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5036 flag);
5037 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5038}
5039EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);