Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
076bb0c8 27#include <net/busy_poll.h>
c0cba59e 28#include <linux/prefetch.h>
9f6c9258 29#include "bnx2x_cmn.h"
523224a3 30#include "bnx2x_init.h"
042181f5 31#include "bnx2x_sp.h"
9f6c9258 32
b3b83c3f
DK
33/**
34 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
65565884
MS
43 * source onto the target. Update txdata pointers and related
44 * content.
b3b83c3f
DK
45 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 56 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
57
58 /* Copy the NAPI object as it has been already initialized */
59 from_fp->napi = to_fp->napi;
60
b3b83c3f
DK
61 /* Move bnx2x_fastpath contents */
62 memcpy(to_fp, from_fp, sizeof(*to_fp));
63 to_fp->index = to;
65565884 64
34d5626a
YM
65 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
67 */
68 to_fp->tpa_info = old_tpa_info;
69
15192a8c
BW
70 /* move sp_objs contents as well, as their indices match fp ones */
71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
72
73 /* move fp_stats contents as well, as their indices match fp ones */
74 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
75
65565884
MS
76 /* Update txdata pointers in fp and move txdata content accordingly:
77 * Each fp consumes 'max_cos' txdata structures, so the index should be
78 * decremented by max_cos x delta.
79 */
80
81 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
82 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
83 (bp)->max_cos;
84 if (from == FCOE_IDX(bp)) {
85 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
86 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
87 }
88
4864a16a
YM
89 memcpy(&bp->bnx2x_txq[new_txdata_index],
90 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
91 sizeof(struct bnx2x_fp_txdata));
92 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
93}
94
8ca5e17e
AE
95/**
96 * bnx2x_fill_fw_str - Fill buffer with FW version string.
97 *
98 * @bp: driver handle
99 * @buf: character buffer to fill with the fw name
100 * @buf_len: length of the above buffer
101 *
102 */
103void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
104{
105 if (IS_PF(bp)) {
106 u8 phy_fw_ver[PHY_FW_VER_LEN];
107
108 phy_fw_ver[0] = '\0';
109 bnx2x_get_ext_phy_fw_version(&bp->link_params,
110 phy_fw_ver, PHY_FW_VER_LEN);
111 strlcpy(buf, bp->fw_ver, buf_len);
112 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
113 "bc %d.%d.%d%s%s",
114 (bp->common.bc_ver & 0xff0000) >> 16,
115 (bp->common.bc_ver & 0xff00) >> 8,
116 (bp->common.bc_ver & 0xff),
117 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
118 } else {
6411280a 119 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
120 }
121}
122
4864a16a
YM
123/**
124 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
125 *
126 * @bp: driver handle
127 * @delta: number of eth queues which were not allocated
128 */
129static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
130{
131 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
132
133 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 134 * backward along the array could cause memory to be overridden
4864a16a
YM
135 */
136 for (cos = 1; cos < bp->max_cos; cos++) {
137 for (i = 0; i < old_eth_num - delta; i++) {
138 struct bnx2x_fastpath *fp = &bp->fp[i];
139 int new_idx = cos * (old_eth_num - delta) + i;
140
141 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
142 sizeof(struct bnx2x_fp_txdata));
143 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
144 }
145 }
146}
147
619c5cb6
VZ
148int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
149
9f6c9258
DK
150/* free skb in the packet ring at pos idx
151 * return idx of last bd freed
152 */
6383c0b3 153static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
154 u16 idx, unsigned int *pkts_compl,
155 unsigned int *bytes_compl)
9f6c9258 156{
6383c0b3 157 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
158 struct eth_tx_start_bd *tx_start_bd;
159 struct eth_tx_bd *tx_data_bd;
160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd;
163
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end);
166
51c1a580 167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 168 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
169
170 /* unmap first bd */
6383c0b3 171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
174
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR
177 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
178 BNX2X_ERR("BAD nbd!\n");
179 bnx2x_panic();
180 }
181#endif
182 new_cons = nbd + tx_buf->first_bd;
183
184 /* Get the next bd */
185 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
186
187 /* Skip a parse bd... */
188 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190
191 /* ...and the TSO split header bd since they have no mapping */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 }
196
197 /* now free frags */
198 while (nbd > 0) {
199
6383c0b3 200 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
201 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
202 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
203 if (--nbd)
204 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
205 }
206
207 /* release skb */
208 WARN_ON(!skb);
d8290ae5 209 if (likely(skb)) {
2df1a70a
TH
210 (*pkts_compl)++;
211 (*bytes_compl) += skb->len;
212 }
d8290ae5 213
40955532 214 dev_kfree_skb_any(skb);
9f6c9258
DK
215 tx_buf->first_bd = 0;
216 tx_buf->skb = NULL;
217
218 return new_cons;
219}
220
6383c0b3 221int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 222{
9f6c9258 223 struct netdev_queue *txq;
6383c0b3 224 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 225 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
226
227#ifdef BNX2X_STOP_ON_ERROR
228 if (unlikely(bp->panic))
229 return -1;
230#endif
231
6383c0b3
AE
232 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
233 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
234 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
235
236 while (sw_cons != hw_cons) {
237 u16 pkt_cons;
238
239 pkt_cons = TX_BD(sw_cons);
240
51c1a580
MS
241 DP(NETIF_MSG_TX_DONE,
242 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 243 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 244
2df1a70a 245 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 246 &pkts_compl, &bytes_compl);
2df1a70a 247
9f6c9258
DK
248 sw_cons++;
249 }
250
2df1a70a
TH
251 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
252
6383c0b3
AE
253 txdata->tx_pkt_cons = sw_cons;
254 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
255
256 /* Need to make the tx_bd_cons update visible to start_xmit()
257 * before checking for netif_tx_queue_stopped(). Without the
258 * memory barrier, there is a small possibility that
259 * start_xmit() will miss it and cause the queue to be stopped
260 * forever.
619c5cb6
VZ
261 * On the other hand we need an rmb() here to ensure the proper
262 * ordering of bit testing in the following
263 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
264 */
265 smp_mb();
266
9f6c9258 267 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 268 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
269 * while it's empty. This could have happen if rx_action() gets
270 * suspended in bnx2x_tx_int() after the condition before
271 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
272 *
273 * stops the queue->sees fresh tx_bd_cons->releases the queue->
274 * sends some packets consuming the whole queue again->
275 * stops the queue
276 */
277
278 __netif_tx_lock(txq, smp_processor_id());
279
280 if ((netif_tx_queue_stopped(txq)) &&
281 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 282 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
283 netif_tx_wake_queue(txq);
284
285 __netif_tx_unlock(txq);
286 }
287 return 0;
288}
289
290static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
291 u16 idx)
292{
293 u16 last_max = fp->last_max_sge;
294
295 if (SUB_S16(idx, last_max) > 0)
296 fp->last_max_sge = idx;
297}
298
621b4d66
DK
299static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
300 u16 sge_len,
301 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
302{
303 struct bnx2x *bp = fp->bp;
9f6c9258
DK
304 u16 last_max, last_elem, first_elem;
305 u16 delta = 0;
306 u16 i;
307
308 if (!sge_len)
309 return;
310
311 /* First mark all used pages */
312 for (i = 0; i < sge_len; i++)
619c5cb6 313 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 314 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
315
316 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 317 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
318
319 /* Here we assume that the last SGE index is the biggest */
320 prefetch((void *)(fp->sge_mask));
523224a3 321 bnx2x_update_last_max_sge(fp,
621b4d66 322 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
323
324 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
325 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
326 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
327
328 /* If ring is not full */
329 if (last_elem + 1 != first_elem)
330 last_elem++;
331
332 /* Now update the prod */
333 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
334 if (likely(fp->sge_mask[i]))
335 break;
336
619c5cb6
VZ
337 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
338 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
339 }
340
341 if (delta > 0) {
342 fp->rx_sge_prod += delta;
343 /* clear page-end entries */
344 bnx2x_clear_sge_mask_next_elems(fp);
345 }
346
347 DP(NETIF_MSG_RX_STATUS,
348 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
349 fp->last_max_sge, fp->rx_sge_prod);
350}
351
2de67439 352/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
353 * CQE (calculated by HW).
354 */
355static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
356 const struct eth_fast_path_rx_cqe *cqe,
357 bool *l4_rxhash)
e52fcb24 358{
2de67439 359 /* Get Toeplitz hash from CQE */
e52fcb24 360 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
361 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
362 enum eth_rss_hash_type htype;
363
364 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
365 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
366 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 367 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
368 }
369 *l4_rxhash = false;
e52fcb24
ED
370 return 0;
371}
372
9f6c9258 373static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 374 u16 cons, u16 prod,
619c5cb6 375 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
376{
377 struct bnx2x *bp = fp->bp;
378 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
379 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
380 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
381 dma_addr_t mapping;
619c5cb6
VZ
382 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
383 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 384
619c5cb6
VZ
385 /* print error if current state != stop */
386 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
387 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
388
e52fcb24 389 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 390 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 391 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
392 fp->rx_buf_size, DMA_FROM_DEVICE);
393 /*
394 * ...if it fails - move the skb from the consumer to the producer
395 * and set the current aggregation state as ERROR to drop it
396 * when TPA_STOP arrives.
397 */
398
399 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
400 /* Move the BD from the consumer to the producer */
e52fcb24 401 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
402 tpa_info->tpa_state = BNX2X_TPA_ERROR;
403 return;
404 }
9f6c9258 405
e52fcb24
ED
406 /* move empty data from pool to prod */
407 prod_rx_buf->data = first_buf->data;
619c5cb6 408 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 409 /* point prod_bd to new data */
9f6c9258
DK
410 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
411 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
412
619c5cb6
VZ
413 /* move partial skb from cons to pool (don't unmap yet) */
414 *first_buf = *cons_rx_buf;
415
416 /* mark bin state as START */
417 tpa_info->parsing_flags =
418 le16_to_cpu(cqe->pars_flags.flags);
419 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
420 tpa_info->tpa_state = BNX2X_TPA_START;
421 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
422 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 423 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
424 if (fp->mode == TPA_MODE_GRO) {
425 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 426 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
427 tpa_info->gro_size = gro_size;
428 }
619c5cb6 429
9f6c9258
DK
430#ifdef BNX2X_STOP_ON_ERROR
431 fp->tpa_queue_used |= (1 << queue);
432#ifdef _ASM_GENERIC_INT_L64_H
433 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
434#else
435 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
436#endif
437 fp->tpa_queue_used);
438#endif
439}
440
e4e3c02a
VZ
441/* Timestamp option length allowed for TPA aggregation:
442 *
443 * nop nop kind length echo val
444 */
445#define TPA_TSTAMP_OPT_LEN 12
446/**
cbf1de72 447 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 448 *
cbf1de72 449 * @skb: packet skb
e8920674
DK
450 * @parsing_flags: parsing flags from the START CQE
451 * @len_on_bd: total length of the first packet for the
452 * aggregation.
cbf1de72 453 * @pkt_len: length of all segments
e8920674
DK
454 *
455 * Approximate value of the MSS for this aggregation calculated using
456 * the first packet of it.
2de67439 457 * Compute number of aggregated segments, and gso_type.
e4e3c02a 458 */
cbf1de72 459static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
460 u16 len_on_bd, unsigned int pkt_len,
461 u16 num_of_coalesced_segs)
e4e3c02a 462{
cbf1de72 463 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 464 * other than timestamp or IPv6 extension headers.
e4e3c02a 465 */
619c5cb6
VZ
466 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
467
468 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 469 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 470 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
471 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
472 } else {
619c5cb6 473 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
474 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
475 }
e4e3c02a
VZ
476
477 /* Check if there was a TCP timestamp, if there is it's will
478 * always be 12 bytes length: nop nop kind length echo val.
479 *
480 * Otherwise FW would close the aggregation.
481 */
482 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
483 hdrs_len += TPA_TSTAMP_OPT_LEN;
484
cbf1de72
YM
485 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
486
487 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
488 * to skb_shinfo(skb)->gso_segs
489 */
ab5777d7 490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
491}
492
996dedba
MS
493static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
494 u16 index, gfp_t gfp_mask)
1191cb83 495{
996dedba 496 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
1191cb83
ED
497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
499 dma_addr_t mapping;
500
501 if (unlikely(page == NULL)) {
502 BNX2X_ERR("Can't alloc sge\n");
503 return -ENOMEM;
504 }
505
506 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 507 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
508 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
509 __free_pages(page, PAGES_PER_SGE_SHIFT);
510 BNX2X_ERR("Can't map sge\n");
511 return -ENOMEM;
512 }
513
514 sw_buf->page = page;
515 dma_unmap_addr_set(sw_buf, mapping, mapping);
516
517 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
518 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
519
520 return 0;
521}
522
9f6c9258 523static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
524 struct bnx2x_agg_info *tpa_info,
525 u16 pages,
526 struct sk_buff *skb,
619c5cb6
VZ
527 struct eth_end_agg_rx_cqe *cqe,
528 u16 cqe_idx)
9f6c9258
DK
529{
530 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
531 u32 i, frag_len, frag_size;
532 int err, j, frag_id = 0;
619c5cb6 533 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 534 u16 full_page = 0, gro_size = 0;
9f6c9258 535
619c5cb6 536 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
537
538 if (fp->mode == TPA_MODE_GRO) {
539 gro_size = tpa_info->gro_size;
540 full_page = tpa_info->full_page;
541 }
9f6c9258
DK
542
543 /* This is needed in order to enable forwarding support */
cbf1de72
YM
544 if (frag_size)
545 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
546 le16_to_cpu(cqe->pkt_len),
547 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 548
9f6c9258 549#ifdef BNX2X_STOP_ON_ERROR
924d75ab 550 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
551 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
552 pages, cqe_idx);
619c5cb6 553 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
554 bnx2x_panic();
555 return -EINVAL;
556 }
557#endif
558
559 /* Run through the SGL and compose the fragmented skb */
560 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 561 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
562
563 /* FW gives the indices of the SGE as if the ring is an array
564 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
565 if (fp->mode == TPA_MODE_GRO)
566 frag_len = min_t(u32, frag_size, (u32)full_page);
567 else /* LRO */
924d75ab 568 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 569
9f6c9258
DK
570 rx_pg = &fp->rx_page_ring[sge_idx];
571 old_rx_pg = *rx_pg;
572
573 /* If we fail to allocate a substitute page, we simply stop
574 where we are and drop the whole packet */
996dedba 575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 576 if (unlikely(err)) {
15192a8c 577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
578 return err;
579 }
580
16a5fd92 581 /* Unmap the page as we're going to pass it to the stack */
9f6c9258
DK
582 dma_unmap_page(&bp->pdev->dev,
583 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 584 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 585 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
586 if (fp->mode == TPA_MODE_LRO)
587 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
588 else { /* GRO */
589 int rem;
590 int offset = 0;
591 for (rem = frag_len; rem > 0; rem -= gro_size) {
592 int len = rem > gro_size ? gro_size : rem;
593 skb_fill_page_desc(skb, frag_id++,
594 old_rx_pg.page, offset, len);
595 if (offset)
596 get_page(old_rx_pg.page);
597 offset += len;
598 }
599 }
9f6c9258
DK
600
601 skb->data_len += frag_len;
924d75ab 602 skb->truesize += SGE_PAGES;
9f6c9258
DK
603 skb->len += frag_len;
604
605 frag_size -= frag_len;
606 }
607
608 return 0;
609}
610
d46d132c
ED
611static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
612{
613 if (fp->rx_frag_size)
614 put_page(virt_to_head_page(data));
615 else
616 kfree(data);
617}
618
996dedba 619static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 620{
996dedba
MS
621 if (fp->rx_frag_size) {
622 /* GFP_KERNEL allocations are used only during initialization */
623 if (unlikely(gfp_mask & __GFP_WAIT))
624 return (void *)__get_free_page(gfp_mask);
625
d46d132c 626 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 627 }
d46d132c 628
996dedba 629 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
630}
631
9969085e
YM
632#ifdef CONFIG_INET
633static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
634{
635 const struct iphdr *iph = ip_hdr(skb);
636 struct tcphdr *th;
637
638 skb_set_transport_header(skb, sizeof(struct iphdr));
639 th = tcp_hdr(skb);
640
641 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
642 iph->saddr, iph->daddr, 0);
643}
644
645static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
646{
647 struct ipv6hdr *iph = ipv6_hdr(skb);
648 struct tcphdr *th;
649
650 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
651 th = tcp_hdr(skb);
652
653 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
654 &iph->saddr, &iph->daddr, 0);
655}
2c2d06d5
YM
656
657static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
658 void (*gro_func)(struct bnx2x*, struct sk_buff*))
659{
660 skb_set_network_header(skb, 0);
661 gro_func(bp, skb);
662 tcp_gro_complete(skb);
663}
9969085e
YM
664#endif
665
666static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
667 struct sk_buff *skb)
668{
669#ifdef CONFIG_INET
cbf1de72 670 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
671 switch (be16_to_cpu(skb->protocol)) {
672 case ETH_P_IP:
2c2d06d5 673 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
674 break;
675 case ETH_P_IPV6:
2c2d06d5 676 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
677 break;
678 default:
2c2d06d5 679 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
680 be16_to_cpu(skb->protocol));
681 }
9969085e
YM
682 }
683#endif
60e66fee 684 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
685 napi_gro_receive(&fp->napi, skb);
686}
687
1191cb83
ED
688static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
689 struct bnx2x_agg_info *tpa_info,
690 u16 pages,
691 struct eth_end_agg_rx_cqe *cqe,
692 u16 cqe_idx)
9f6c9258 693{
619c5cb6 694 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 695 u8 pad = tpa_info->placement_offset;
619c5cb6 696 u16 len = tpa_info->len_on_bd;
e52fcb24 697 struct sk_buff *skb = NULL;
621b4d66 698 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
699 u8 old_tpa_state = tpa_info->tpa_state;
700
701 tpa_info->tpa_state = BNX2X_TPA_STOP;
702
703 /* If we there was an error during the handling of the TPA_START -
704 * drop this aggregation.
705 */
706 if (old_tpa_state == BNX2X_TPA_ERROR)
707 goto drop;
708
e52fcb24 709 /* Try to allocate the new data */
996dedba 710 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
711 /* Unmap skb in the pool anyway, as we are going to change
712 pool entry status to BNX2X_TPA_STOP even if new skb allocation
713 fails. */
714 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 715 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 716 if (likely(new_data))
d46d132c 717 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 718
e52fcb24 719 if (likely(skb)) {
9f6c9258 720#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 721 if (pad + len > fp->rx_buf_size) {
51c1a580 722 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 723 pad, len, fp->rx_buf_size);
9f6c9258
DK
724 bnx2x_panic();
725 return;
726 }
727#endif
728
e52fcb24 729 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 730 skb_put(skb, len);
e52fcb24 731 skb->rxhash = tpa_info->rxhash;
a334b5fb 732 skb->l4_rxhash = tpa_info->l4_rxhash;
9f6c9258
DK
733
734 skb->protocol = eth_type_trans(skb, bp->dev);
735 skb->ip_summed = CHECKSUM_UNNECESSARY;
736
621b4d66
DK
737 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
738 skb, cqe, cqe_idx)) {
619c5cb6 739 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 740 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 741 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 742 } else {
51c1a580
MS
743 DP(NETIF_MSG_RX_STATUS,
744 "Failed to allocate new pages - dropping packet!\n");
40955532 745 dev_kfree_skb_any(skb);
9f6c9258
DK
746 }
747
e52fcb24
ED
748 /* put new data in bin */
749 rx_buf->data = new_data;
9f6c9258 750
619c5cb6 751 return;
9f6c9258 752 }
d46d132c 753 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
754drop:
755 /* drop the packet and keep the buffer in the bin */
756 DP(NETIF_MSG_RX_STATUS,
757 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 758 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
759}
760
996dedba
MS
761static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
762 u16 index, gfp_t gfp_mask)
1191cb83
ED
763{
764 u8 *data;
765 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
766 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
767 dma_addr_t mapping;
768
996dedba 769 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
770 if (unlikely(data == NULL))
771 return -ENOMEM;
772
773 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
774 fp->rx_buf_size,
775 DMA_FROM_DEVICE);
776 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 777 bnx2x_frag_free(fp, data);
1191cb83
ED
778 BNX2X_ERR("Can't map rx data\n");
779 return -ENOMEM;
780 }
781
782 rx_buf->data = data;
783 dma_unmap_addr_set(rx_buf, mapping, mapping);
784
785 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
786 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
787
788 return 0;
789}
790
15192a8c
BW
791static
792void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
793 struct bnx2x_fastpath *fp,
794 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 795{
e488921f
MS
796 /* Do nothing if no L4 csum validation was done.
797 * We do not check whether IP csum was validated. For IPv4 we assume
798 * that if the card got as far as validating the L4 csum, it also
799 * validated the IP csum. IPv6 has no IP csum.
800 */
d6cb3e41 801 if (cqe->fast_path_cqe.status_flags &
e488921f 802 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
803 return;
804
e488921f 805 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
806
807 if (cqe->fast_path_cqe.type_error_flags &
808 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
809 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 810 qstats->hw_csum_err++;
d6cb3e41
ED
811 else
812 skb->ip_summed = CHECKSUM_UNNECESSARY;
813}
9f6c9258
DK
814
815int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
816{
817 struct bnx2x *bp = fp->bp;
818 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 819 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 820 int rx_pkt = 0;
75b29459
DK
821 union eth_rx_cqe *cqe;
822 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
823
824#ifdef BNX2X_STOP_ON_ERROR
825 if (unlikely(bp->panic))
826 return 0;
827#endif
828
9f6c9258
DK
829 bd_cons = fp->rx_bd_cons;
830 bd_prod = fp->rx_bd_prod;
831 bd_prod_fw = bd_prod;
832 sw_comp_cons = fp->rx_comp_cons;
833 sw_comp_prod = fp->rx_comp_prod;
834
75b29459
DK
835 comp_ring_cons = RCQ_BD(sw_comp_cons);
836 cqe = &fp->rx_comp_ring[comp_ring_cons];
837 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
838
839 DP(NETIF_MSG_RX_STATUS,
75b29459 840 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 841
75b29459 842 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
843 struct sw_rx_bd *rx_buf = NULL;
844 struct sk_buff *skb;
9f6c9258 845 u8 cqe_fp_flags;
619c5cb6 846 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 847 u16 len, pad, queue;
e52fcb24 848 u8 *data;
a334b5fb 849 bool l4_rxhash;
9f6c9258 850
619c5cb6
VZ
851#ifdef BNX2X_STOP_ON_ERROR
852 if (unlikely(bp->panic))
853 return 0;
854#endif
855
9f6c9258
DK
856 bd_prod = RX_BD(bd_prod);
857 bd_cons = RX_BD(bd_cons);
858
619c5cb6
VZ
859 cqe_fp_flags = cqe_fp->type_error_flags;
860 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 861
51c1a580
MS
862 DP(NETIF_MSG_RX_STATUS,
863 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
864 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
865 cqe_fp_flags, cqe_fp->status_flags,
866 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
867 le16_to_cpu(cqe_fp->vlan_tag),
868 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
869
870 /* is this a slowpath msg? */
619c5cb6 871 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
872 bnx2x_sp_event(fp, cqe);
873 goto next_cqe;
e52fcb24 874 }
621b4d66 875
e52fcb24
ED
876 rx_buf = &fp->rx_buf_ring[bd_cons];
877 data = rx_buf->data;
9f6c9258 878
e52fcb24 879 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
880 struct bnx2x_agg_info *tpa_info;
881 u16 frag_size, pages;
619c5cb6 882#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
883 /* sanity check */
884 if (fp->disable_tpa &&
885 (CQE_TYPE_START(cqe_fp_type) ||
886 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 887 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 888 CQE_TYPE(cqe_fp_type));
619c5cb6 889#endif
9f6c9258 890
e52fcb24
ED
891 if (CQE_TYPE_START(cqe_fp_type)) {
892 u16 queue = cqe_fp->queue_index;
893 DP(NETIF_MSG_RX_STATUS,
894 "calling tpa_start on queue %d\n",
895 queue);
9f6c9258 896
e52fcb24
ED
897 bnx2x_tpa_start(fp, queue,
898 bd_cons, bd_prod,
899 cqe_fp);
621b4d66 900
e52fcb24 901 goto next_rx;
621b4d66
DK
902 }
903 queue = cqe->end_agg_cqe.queue_index;
904 tpa_info = &fp->tpa_info[queue];
905 DP(NETIF_MSG_RX_STATUS,
906 "calling tpa_stop on queue %d\n",
907 queue);
908
909 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
910 tpa_info->len_on_bd;
911
912 if (fp->mode == TPA_MODE_GRO)
913 pages = (frag_size + tpa_info->full_page - 1) /
914 tpa_info->full_page;
915 else
916 pages = SGE_PAGE_ALIGN(frag_size) >>
917 SGE_PAGE_SHIFT;
918
919 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
920 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 921#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
922 if (bp->panic)
923 return 0;
9f6c9258
DK
924#endif
925
621b4d66
DK
926 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
927 goto next_cqe;
e52fcb24
ED
928 }
929 /* non TPA */
621b4d66 930 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
931 pad = cqe_fp->placement_offset;
932 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 933 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
934 pad + RX_COPY_THRESH,
935 DMA_FROM_DEVICE);
936 pad += NET_SKB_PAD;
937 prefetch(data + pad); /* speedup eth_type_trans() */
938 /* is this an error packet? */
939 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 940 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
941 "ERROR flags %x rx packet %u\n",
942 cqe_fp_flags, sw_comp_cons);
15192a8c 943 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
944 goto reuse_rx;
945 }
9f6c9258 946
e52fcb24
ED
947 /* Since we don't have a jumbo ring
948 * copy small packets if mtu > 1500
949 */
950 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
951 (len <= RX_COPY_THRESH)) {
952 skb = netdev_alloc_skb_ip_align(bp->dev, len);
953 if (skb == NULL) {
51c1a580 954 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 955 "ERROR packet dropped because of alloc failure\n");
15192a8c 956 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
957 goto reuse_rx;
958 }
e52fcb24
ED
959 memcpy(skb->data, data + pad, len);
960 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
961 } else {
996dedba
MS
962 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
963 GFP_ATOMIC) == 0)) {
9f6c9258 964 dma_unmap_single(&bp->pdev->dev,
e52fcb24 965 dma_unmap_addr(rx_buf, mapping),
a8c94b91 966 fp->rx_buf_size,
9f6c9258 967 DMA_FROM_DEVICE);
d46d132c 968 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 969 if (unlikely(!skb)) {
d46d132c 970 bnx2x_frag_free(fp, data);
15192a8c
BW
971 bnx2x_fp_qstats(bp, fp)->
972 rx_skb_alloc_failed++;
e52fcb24
ED
973 goto next_rx;
974 }
9f6c9258 975 skb_reserve(skb, pad);
9f6c9258 976 } else {
51c1a580
MS
977 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
978 "ERROR packet dropped because of alloc failure\n");
15192a8c 979 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 980reuse_rx:
e52fcb24 981 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
982 goto next_rx;
983 }
036d2df9 984 }
9f6c9258 985
036d2df9
DK
986 skb_put(skb, len);
987 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 988
036d2df9 989 /* Set Toeplitz hash for a none-LRO skb */
a334b5fb
ED
990 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
991 skb->l4_rxhash = l4_rxhash;
9f6c9258 992
036d2df9 993 skb_checksum_none_assert(skb);
f85582f8 994
d6cb3e41 995 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
996 bnx2x_csum_validate(skb, cqe, fp,
997 bnx2x_fp_qstats(bp, fp));
9f6c9258 998
f233cafe 999 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1000
619c5cb6
VZ
1001 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1002 PARSING_FLAGS_VLAN)
86a9bad3 1003 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1004 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1005
8b80cda5 1006 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1007
1008 if (bnx2x_fp_ll_polling(fp))
1009 netif_receive_skb(skb);
1010 else
1011 napi_gro_receive(&fp->napi, skb);
9f6c9258 1012next_rx:
e52fcb24 1013 rx_buf->data = NULL;
9f6c9258
DK
1014
1015 bd_cons = NEXT_RX_IDX(bd_cons);
1016 bd_prod = NEXT_RX_IDX(bd_prod);
1017 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1018 rx_pkt++;
1019next_cqe:
1020 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1021 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1022
75b29459
DK
1023 /* mark CQE as free */
1024 BNX2X_SEED_CQE(cqe_fp);
1025
9f6c9258
DK
1026 if (rx_pkt == budget)
1027 break;
75b29459
DK
1028
1029 comp_ring_cons = RCQ_BD(sw_comp_cons);
1030 cqe = &fp->rx_comp_ring[comp_ring_cons];
1031 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1032 } /* while */
1033
1034 fp->rx_bd_cons = bd_cons;
1035 fp->rx_bd_prod = bd_prod_fw;
1036 fp->rx_comp_cons = sw_comp_cons;
1037 fp->rx_comp_prod = sw_comp_prod;
1038
1039 /* Update producers */
1040 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1041 fp->rx_sge_prod);
1042
1043 fp->rx_pkt += rx_pkt;
1044 fp->rx_calls++;
1045
1046 return rx_pkt;
1047}
1048
1049static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1050{
1051 struct bnx2x_fastpath *fp = fp_cookie;
1052 struct bnx2x *bp = fp->bp;
6383c0b3 1053 u8 cos;
9f6c9258 1054
51c1a580
MS
1055 DP(NETIF_MSG_INTR,
1056 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1057 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1058
523224a3 1059 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1060
1061#ifdef BNX2X_STOP_ON_ERROR
1062 if (unlikely(bp->panic))
1063 return IRQ_HANDLED;
1064#endif
1065
1066 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1067 for_each_cos_in_tx_queue(fp, cos)
65565884 1068 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1069
523224a3 1070 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1071 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1072
1073 return IRQ_HANDLED;
1074}
1075
9f6c9258
DK
1076/* HW Lock for shared dual port PHYs */
1077void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1078{
1079 mutex_lock(&bp->port.phy_mutex);
1080
8203c4b6 1081 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1082}
1083
1084void bnx2x_release_phy_lock(struct bnx2x *bp)
1085{
8203c4b6 1086 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1087
1088 mutex_unlock(&bp->port.phy_mutex);
1089}
1090
0793f83f
DK
1091/* calculates MF speed according to current linespeed and MF configuration */
1092u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1093{
1094 u16 line_speed = bp->link_vars.line_speed;
1095 if (IS_MF(bp)) {
faa6fcbb
DK
1096 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1097 bp->mf_config[BP_VN(bp)]);
1098
1099 /* Calculate the current MAX line speed limit for the MF
1100 * devices
0793f83f 1101 */
faa6fcbb
DK
1102 if (IS_MF_SI(bp))
1103 line_speed = (line_speed * maxCfg) / 100;
1104 else { /* SD mode */
0793f83f
DK
1105 u16 vn_max_rate = maxCfg * 100;
1106
1107 if (vn_max_rate < line_speed)
1108 line_speed = vn_max_rate;
faa6fcbb 1109 }
0793f83f
DK
1110 }
1111
1112 return line_speed;
1113}
1114
2ae17f66
VZ
1115/**
1116 * bnx2x_fill_report_data - fill link report data to report
1117 *
1118 * @bp: driver handle
1119 * @data: link state to update
1120 *
1121 * It uses a none-atomic bit operations because is called under the mutex.
1122 */
1191cb83
ED
1123static void bnx2x_fill_report_data(struct bnx2x *bp,
1124 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1125{
1126 u16 line_speed = bnx2x_get_mf_speed(bp);
1127
1128 memset(data, 0, sizeof(*data));
1129
16a5fd92 1130 /* Fill the report data: effective line speed */
2ae17f66
VZ
1131 data->line_speed = line_speed;
1132
1133 /* Link is down */
1134 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1135 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1136 &data->link_report_flags);
1137
1138 /* Full DUPLEX */
1139 if (bp->link_vars.duplex == DUPLEX_FULL)
1140 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1141
1142 /* Rx Flow Control is ON */
1143 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1144 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1145
1146 /* Tx Flow Control is ON */
1147 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1148 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1149}
1150
1151/**
1152 * bnx2x_link_report - report link status to OS.
1153 *
1154 * @bp: driver handle
1155 *
1156 * Calls the __bnx2x_link_report() under the same locking scheme
1157 * as a link/PHY state managing code to ensure a consistent link
1158 * reporting.
1159 */
1160
9f6c9258
DK
1161void bnx2x_link_report(struct bnx2x *bp)
1162{
2ae17f66
VZ
1163 bnx2x_acquire_phy_lock(bp);
1164 __bnx2x_link_report(bp);
1165 bnx2x_release_phy_lock(bp);
1166}
9f6c9258 1167
2ae17f66
VZ
1168/**
1169 * __bnx2x_link_report - report link status to OS.
1170 *
1171 * @bp: driver handle
1172 *
16a5fd92 1173 * None atomic implementation.
2ae17f66
VZ
1174 * Should be called under the phy_lock.
1175 */
1176void __bnx2x_link_report(struct bnx2x *bp)
1177{
1178 struct bnx2x_link_report_data cur_data;
9f6c9258 1179
2ae17f66 1180 /* reread mf_cfg */
ad5afc89 1181 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1182 bnx2x_read_mf_cfg(bp);
1183
1184 /* Read the current link report info */
1185 bnx2x_fill_report_data(bp, &cur_data);
1186
1187 /* Don't report link down or exactly the same link status twice */
1188 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1189 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1190 &bp->last_reported_link.link_report_flags) &&
1191 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1192 &cur_data.link_report_flags)))
1193 return;
1194
1195 bp->link_cnt++;
9f6c9258 1196
2ae17f66
VZ
1197 /* We are going to report a new link parameters now -
1198 * remember the current data for the next time.
1199 */
1200 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1201
2ae17f66
VZ
1202 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1203 &cur_data.link_report_flags)) {
1204 netif_carrier_off(bp->dev);
1205 netdev_err(bp->dev, "NIC Link is Down\n");
1206 return;
1207 } else {
94f05b0f
JP
1208 const char *duplex;
1209 const char *flow;
1210
2ae17f66 1211 netif_carrier_on(bp->dev);
9f6c9258 1212
2ae17f66
VZ
1213 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1214 &cur_data.link_report_flags))
94f05b0f 1215 duplex = "full";
9f6c9258 1216 else
94f05b0f 1217 duplex = "half";
9f6c9258 1218
2ae17f66
VZ
1219 /* Handle the FC at the end so that only these flags would be
1220 * possibly set. This way we may easily check if there is no FC
1221 * enabled.
1222 */
1223 if (cur_data.link_report_flags) {
1224 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1225 &cur_data.link_report_flags)) {
2ae17f66
VZ
1226 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1227 &cur_data.link_report_flags))
94f05b0f
JP
1228 flow = "ON - receive & transmit";
1229 else
1230 flow = "ON - receive";
9f6c9258 1231 } else {
94f05b0f 1232 flow = "ON - transmit";
9f6c9258 1233 }
94f05b0f
JP
1234 } else {
1235 flow = "none";
9f6c9258 1236 }
94f05b0f
JP
1237 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1238 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1239 }
1240}
1241
1191cb83
ED
1242static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1243{
1244 int i;
1245
1246 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1247 struct eth_rx_sge *sge;
1248
1249 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1250 sge->addr_hi =
1251 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1252 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1253
1254 sge->addr_lo =
1255 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1256 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1257 }
1258}
1259
1260static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1261 struct bnx2x_fastpath *fp, int last)
1262{
1263 int i;
1264
1265 for (i = 0; i < last; i++) {
1266 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1267 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1268 u8 *data = first_buf->data;
1269
1270 if (data == NULL) {
1271 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1272 continue;
1273 }
1274 if (tpa_info->tpa_state == BNX2X_TPA_START)
1275 dma_unmap_single(&bp->pdev->dev,
1276 dma_unmap_addr(first_buf, mapping),
1277 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1278 bnx2x_frag_free(fp, data);
1191cb83
ED
1279 first_buf->data = NULL;
1280 }
1281}
1282
55c11941
MS
1283void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1284{
1285 int j;
1286
1287 for_each_rx_queue_cnic(bp, j) {
1288 struct bnx2x_fastpath *fp = &bp->fp[j];
1289
1290 fp->rx_bd_cons = 0;
1291
1292 /* Activate BD ring */
1293 /* Warning!
1294 * this will generate an interrupt (to the TSTORM)
1295 * must only be done after chip is initialized
1296 */
1297 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1298 fp->rx_sge_prod);
1299 }
1300}
1301
9f6c9258
DK
1302void bnx2x_init_rx_rings(struct bnx2x *bp)
1303{
1304 int func = BP_FUNC(bp);
523224a3 1305 u16 ring_prod;
9f6c9258 1306 int i, j;
25141580 1307
b3b83c3f 1308 /* Allocate TPA resources */
55c11941 1309 for_each_eth_queue(bp, j) {
523224a3 1310 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1311
a8c94b91
VZ
1312 DP(NETIF_MSG_IFUP,
1313 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1314
523224a3 1315 if (!fp->disable_tpa) {
16a5fd92 1316 /* Fill the per-aggregation pool */
dfacf138 1317 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1318 struct bnx2x_agg_info *tpa_info =
1319 &fp->tpa_info[i];
1320 struct sw_rx_bd *first_buf =
1321 &tpa_info->first_buf;
1322
996dedba
MS
1323 first_buf->data =
1324 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1325 if (!first_buf->data) {
51c1a580
MS
1326 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1327 j);
9f6c9258
DK
1328 bnx2x_free_tpa_pool(bp, fp, i);
1329 fp->disable_tpa = 1;
1330 break;
1331 }
619c5cb6
VZ
1332 dma_unmap_addr_set(first_buf, mapping, 0);
1333 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1334 }
523224a3
DK
1335
1336 /* "next page" elements initialization */
1337 bnx2x_set_next_page_sgl(fp);
1338
1339 /* set SGEs bit mask */
1340 bnx2x_init_sge_ring_bit_mask(fp);
1341
1342 /* Allocate SGEs and initialize the ring elements */
1343 for (i = 0, ring_prod = 0;
1344 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1345
996dedba
MS
1346 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1347 GFP_KERNEL) < 0) {
51c1a580
MS
1348 BNX2X_ERR("was only able to allocate %d rx sges\n",
1349 i);
1350 BNX2X_ERR("disabling TPA for queue[%d]\n",
1351 j);
523224a3 1352 /* Cleanup already allocated elements */
619c5cb6
VZ
1353 bnx2x_free_rx_sge_range(bp, fp,
1354 ring_prod);
1355 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1356 MAX_AGG_QS(bp));
523224a3
DK
1357 fp->disable_tpa = 1;
1358 ring_prod = 0;
1359 break;
1360 }
1361 ring_prod = NEXT_SGE_IDX(ring_prod);
1362 }
1363
1364 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1365 }
1366 }
1367
55c11941 1368 for_each_eth_queue(bp, j) {
9f6c9258
DK
1369 struct bnx2x_fastpath *fp = &bp->fp[j];
1370
1371 fp->rx_bd_cons = 0;
9f6c9258 1372
b3b83c3f
DK
1373 /* Activate BD ring */
1374 /* Warning!
1375 * this will generate an interrupt (to the TSTORM)
1376 * must only be done after chip is initialized
1377 */
1378 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1379 fp->rx_sge_prod);
9f6c9258 1380
9f6c9258
DK
1381 if (j != 0)
1382 continue;
1383
619c5cb6 1384 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1385 REG_WR(bp, BAR_USTRORM_INTMEM +
1386 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1387 U64_LO(fp->rx_comp_mapping));
1388 REG_WR(bp, BAR_USTRORM_INTMEM +
1389 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1390 U64_HI(fp->rx_comp_mapping));
1391 }
9f6c9258
DK
1392 }
1393}
f85582f8 1394
55c11941 1395static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1396{
6383c0b3 1397 u8 cos;
55c11941 1398 struct bnx2x *bp = fp->bp;
9f6c9258 1399
55c11941
MS
1400 for_each_cos_in_tx_queue(fp, cos) {
1401 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1402 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1403
55c11941
MS
1404 u16 sw_prod = txdata->tx_pkt_prod;
1405 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1406
55c11941
MS
1407 while (sw_cons != sw_prod) {
1408 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1409 &pkts_compl, &bytes_compl);
1410 sw_cons++;
9f6c9258 1411 }
55c11941
MS
1412
1413 netdev_tx_reset_queue(
1414 netdev_get_tx_queue(bp->dev,
1415 txdata->txq_index));
1416 }
1417}
1418
1419static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1420{
1421 int i;
1422
1423 for_each_tx_queue_cnic(bp, i) {
1424 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1425 }
1426}
1427
1428static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1429{
1430 int i;
1431
1432 for_each_eth_queue(bp, i) {
1433 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1434 }
1435}
1436
b3b83c3f
DK
1437static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1438{
1439 struct bnx2x *bp = fp->bp;
1440 int i;
1441
1442 /* ring wasn't allocated */
1443 if (fp->rx_buf_ring == NULL)
1444 return;
1445
1446 for (i = 0; i < NUM_RX_BD; i++) {
1447 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1448 u8 *data = rx_buf->data;
b3b83c3f 1449
e52fcb24 1450 if (data == NULL)
b3b83c3f 1451 continue;
b3b83c3f
DK
1452 dma_unmap_single(&bp->pdev->dev,
1453 dma_unmap_addr(rx_buf, mapping),
1454 fp->rx_buf_size, DMA_FROM_DEVICE);
1455
e52fcb24 1456 rx_buf->data = NULL;
d46d132c 1457 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1458 }
1459}
1460
55c11941
MS
1461static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1462{
1463 int j;
1464
1465 for_each_rx_queue_cnic(bp, j) {
1466 bnx2x_free_rx_bds(&bp->fp[j]);
1467 }
1468}
1469
9f6c9258
DK
1470static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1471{
b3b83c3f 1472 int j;
9f6c9258 1473
55c11941 1474 for_each_eth_queue(bp, j) {
9f6c9258
DK
1475 struct bnx2x_fastpath *fp = &bp->fp[j];
1476
b3b83c3f 1477 bnx2x_free_rx_bds(fp);
9f6c9258 1478
9f6c9258 1479 if (!fp->disable_tpa)
dfacf138 1480 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1481 }
1482}
1483
55c11941
MS
1484void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1485{
1486 bnx2x_free_tx_skbs_cnic(bp);
1487 bnx2x_free_rx_skbs_cnic(bp);
1488}
1489
9f6c9258
DK
1490void bnx2x_free_skbs(struct bnx2x *bp)
1491{
1492 bnx2x_free_tx_skbs(bp);
1493 bnx2x_free_rx_skbs(bp);
1494}
1495
e3835b99
DK
1496void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1497{
1498 /* load old values */
1499 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1500
1501 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1502 /* leave all but MAX value */
1503 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1504
1505 /* set new MAX value */
1506 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1507 & FUNC_MF_CFG_MAX_BW_MASK;
1508
1509 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1510 }
1511}
1512
ca92429f
DK
1513/**
1514 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1515 *
1516 * @bp: driver handle
1517 * @nvecs: number of vectors to be released
1518 */
1519static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1520{
ca92429f 1521 int i, offset = 0;
9f6c9258 1522
ca92429f
DK
1523 if (nvecs == offset)
1524 return;
ad5afc89
AE
1525
1526 /* VFs don't have a default SB */
1527 if (IS_PF(bp)) {
1528 free_irq(bp->msix_table[offset].vector, bp->dev);
1529 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1530 bp->msix_table[offset].vector);
1531 offset++;
1532 }
55c11941
MS
1533
1534 if (CNIC_SUPPORT(bp)) {
1535 if (nvecs == offset)
1536 return;
1537 offset++;
1538 }
ca92429f 1539
ec6ba945 1540 for_each_eth_queue(bp, i) {
ca92429f
DK
1541 if (nvecs == offset)
1542 return;
51c1a580
MS
1543 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1544 i, bp->msix_table[offset].vector);
9f6c9258 1545
ca92429f 1546 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1547 }
1548}
1549
d6214d7a 1550void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1551{
30a5de77 1552 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1553 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1554 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1555
1556 /* vfs don't have a default status block */
1557 if (IS_PF(bp))
1558 nvecs++;
1559
1560 bnx2x_free_msix_irqs(bp, nvecs);
1561 } else {
30a5de77 1562 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1563 }
9f6c9258
DK
1564}
1565
0e8d2ec5 1566int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1567{
1ab4434c 1568 int msix_vec = 0, i, rc;
9f6c9258 1569
1ab4434c
AE
1570 /* VFs don't have a default status block */
1571 if (IS_PF(bp)) {
1572 bp->msix_table[msix_vec].entry = msix_vec;
1573 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1574 bp->msix_table[0].entry);
1575 msix_vec++;
1576 }
9f6c9258 1577
55c11941
MS
1578 /* Cnic requires an msix vector for itself */
1579 if (CNIC_SUPPORT(bp)) {
1580 bp->msix_table[msix_vec].entry = msix_vec;
1581 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1582 msix_vec, bp->msix_table[msix_vec].entry);
1583 msix_vec++;
1584 }
1585
6383c0b3 1586 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1587 for_each_eth_queue(bp, i) {
d6214d7a 1588 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1589 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1590 msix_vec, msix_vec, i);
d6214d7a 1591 msix_vec++;
9f6c9258
DK
1592 }
1593
1ab4434c
AE
1594 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1595 msix_vec);
d6214d7a 1596
1ab4434c 1597 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1598
1599 /*
1600 * reconfigure number of tx/rx queues according to available
1601 * MSI-X vectors
1602 */
55c11941 1603 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1604 /* how less vectors we will have? */
1ab4434c 1605 int diff = msix_vec - rc;
9f6c9258 1606
51c1a580 1607 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1608
1609 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1610
1611 if (rc) {
30a5de77
DK
1612 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1613 goto no_msix;
9f6c9258 1614 }
d6214d7a
DK
1615 /*
1616 * decrease number of queues by number of unallocated entries
1617 */
55c11941
MS
1618 bp->num_ethernet_queues -= diff;
1619 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1620
51c1a580 1621 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1622 bp->num_queues);
1623 } else if (rc > 0) {
1624 /* Get by with single vector */
1625 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1626 if (rc) {
1627 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1628 rc);
1629 goto no_msix;
1630 }
1631
1632 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1633 bp->flags |= USING_SINGLE_MSIX_FLAG;
1634
55c11941
MS
1635 BNX2X_DEV_INFO("set number of queues to 1\n");
1636 bp->num_ethernet_queues = 1;
1637 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1638 } else if (rc < 0) {
51c1a580 1639 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1640 goto no_msix;
9f6c9258
DK
1641 }
1642
1643 bp->flags |= USING_MSIX_FLAG;
1644
1645 return 0;
30a5de77
DK
1646
1647no_msix:
1648 /* fall to INTx if not enough memory */
1649 if (rc == -ENOMEM)
1650 bp->flags |= DISABLE_MSI_FLAG;
1651
1652 return rc;
9f6c9258
DK
1653}
1654
1655static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1656{
ca92429f 1657 int i, rc, offset = 0;
9f6c9258 1658
ad5afc89
AE
1659 /* no default status block for vf */
1660 if (IS_PF(bp)) {
1661 rc = request_irq(bp->msix_table[offset++].vector,
1662 bnx2x_msix_sp_int, 0,
1663 bp->dev->name, bp->dev);
1664 if (rc) {
1665 BNX2X_ERR("request sp irq failed\n");
1666 return -EBUSY;
1667 }
9f6c9258
DK
1668 }
1669
55c11941
MS
1670 if (CNIC_SUPPORT(bp))
1671 offset++;
1672
ec6ba945 1673 for_each_eth_queue(bp, i) {
9f6c9258
DK
1674 struct bnx2x_fastpath *fp = &bp->fp[i];
1675 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1676 bp->dev->name, i);
1677
d6214d7a 1678 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1679 bnx2x_msix_fp_int, 0, fp->name, fp);
1680 if (rc) {
ca92429f
DK
1681 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1682 bp->msix_table[offset].vector, rc);
1683 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1684 return -EBUSY;
1685 }
1686
d6214d7a 1687 offset++;
9f6c9258
DK
1688 }
1689
ec6ba945 1690 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1691 if (IS_PF(bp)) {
1692 offset = 1 + CNIC_SUPPORT(bp);
1693 netdev_info(bp->dev,
1694 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1695 bp->msix_table[0].vector,
1696 0, bp->msix_table[offset].vector,
1697 i - 1, bp->msix_table[offset + i - 1].vector);
1698 } else {
1699 offset = CNIC_SUPPORT(bp);
1700 netdev_info(bp->dev,
1701 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1702 0, bp->msix_table[offset].vector,
1703 i - 1, bp->msix_table[offset + i - 1].vector);
1704 }
9f6c9258
DK
1705 return 0;
1706}
1707
d6214d7a 1708int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1709{
1710 int rc;
1711
1712 rc = pci_enable_msi(bp->pdev);
1713 if (rc) {
51c1a580 1714 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1715 return -1;
1716 }
1717 bp->flags |= USING_MSI_FLAG;
1718
1719 return 0;
1720}
1721
1722static int bnx2x_req_irq(struct bnx2x *bp)
1723{
1724 unsigned long flags;
30a5de77 1725 unsigned int irq;
9f6c9258 1726
30a5de77 1727 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1728 flags = 0;
1729 else
1730 flags = IRQF_SHARED;
1731
30a5de77
DK
1732 if (bp->flags & USING_MSIX_FLAG)
1733 irq = bp->msix_table[0].vector;
1734 else
1735 irq = bp->pdev->irq;
1736
1737 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1738}
1739
c957d09f 1740static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1741{
1742 int rc = 0;
30a5de77
DK
1743 if (bp->flags & USING_MSIX_FLAG &&
1744 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1745 rc = bnx2x_req_msix_irqs(bp);
1746 if (rc)
1747 return rc;
1748 } else {
619c5cb6
VZ
1749 rc = bnx2x_req_irq(bp);
1750 if (rc) {
1751 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1752 return rc;
1753 }
1754 if (bp->flags & USING_MSI_FLAG) {
1755 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1756 netdev_info(bp->dev, "using MSI IRQ %d\n",
1757 bp->dev->irq);
1758 }
1759 if (bp->flags & USING_MSIX_FLAG) {
1760 bp->dev->irq = bp->msix_table[0].vector;
1761 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1762 bp->dev->irq);
619c5cb6
VZ
1763 }
1764 }
1765
1766 return 0;
1767}
1768
55c11941
MS
1769static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1770{
1771 int i;
1772
8f20aa57
DK
1773 for_each_rx_queue_cnic(bp, i) {
1774 bnx2x_fp_init_lock(&bp->fp[i]);
55c11941 1775 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1776 }
55c11941
MS
1777}
1778
1191cb83 1779static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1780{
1781 int i;
1782
8f20aa57
DK
1783 for_each_eth_queue(bp, i) {
1784 bnx2x_fp_init_lock(&bp->fp[i]);
9f6c9258 1785 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1786 }
9f6c9258
DK
1787}
1788
55c11941
MS
1789static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1790{
1791 int i;
1792
8f20aa57
DK
1793 local_bh_disable();
1794 for_each_rx_queue_cnic(bp, i) {
55c11941 1795 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57
DK
1796 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1797 mdelay(1);
1798 }
1799 local_bh_enable();
55c11941
MS
1800}
1801
1191cb83 1802static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1803{
1804 int i;
1805
8f20aa57
DK
1806 local_bh_disable();
1807 for_each_eth_queue(bp, i) {
9f6c9258 1808 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57
DK
1809 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1810 mdelay(1);
1811 }
1812 local_bh_enable();
9f6c9258
DK
1813}
1814
1815void bnx2x_netif_start(struct bnx2x *bp)
1816{
4b7ed897
DK
1817 if (netif_running(bp->dev)) {
1818 bnx2x_napi_enable(bp);
55c11941
MS
1819 if (CNIC_LOADED(bp))
1820 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1821 bnx2x_int_enable(bp);
1822 if (bp->state == BNX2X_STATE_OPEN)
1823 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1824 }
1825}
1826
1827void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1828{
1829 bnx2x_int_disable_sync(bp, disable_hw);
1830 bnx2x_napi_disable(bp);
55c11941
MS
1831 if (CNIC_LOADED(bp))
1832 bnx2x_napi_disable_cnic(bp);
9f6c9258 1833}
9f6c9258 1834
8307fa3e
VZ
1835u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1836{
8307fa3e 1837 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1838
55c11941 1839 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1840 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1841 u16 ether_type = ntohs(hdr->h_proto);
1842
1843 /* Skip VLAN tag if present */
1844 if (ether_type == ETH_P_8021Q) {
1845 struct vlan_ethhdr *vhdr =
1846 (struct vlan_ethhdr *)skb->data;
1847
1848 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1849 }
1850
1851 /* If ethertype is FCoE or FIP - use FCoE ring */
1852 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1853 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1854 }
55c11941 1855
cdb9d6ae 1856 /* select a non-FCoE queue */
ada7c19e 1857 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1858}
1859
d6214d7a
DK
1860void bnx2x_set_num_queues(struct bnx2x *bp)
1861{
96305234 1862 /* RSS queues */
55c11941 1863 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1864
a3348722
BW
1865 /* override in STORAGE SD modes */
1866 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1867 bp->num_ethernet_queues = 1;
1868
ec6ba945 1869 /* Add special queues */
55c11941
MS
1870 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1871 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1872
1873 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1874}
1875
cdb9d6ae
VZ
1876/**
1877 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1878 *
1879 * @bp: Driver handle
1880 *
1881 * We currently support for at most 16 Tx queues for each CoS thus we will
1882 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1883 * bp->max_cos.
1884 *
1885 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1886 * index after all ETH L2 indices.
1887 *
1888 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1889 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1890 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1891 *
1892 * The proper configuration of skb->queue_mapping is handled by
1893 * bnx2x_select_queue() and __skb_tx_hash().
1894 *
1895 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1896 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1897 */
55c11941 1898static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1899{
6383c0b3 1900 int rc, tx, rx;
ec6ba945 1901
65565884 1902 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1903 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1904
6383c0b3 1905/* account for fcoe queue */
55c11941
MS
1906 if (include_cnic && !NO_FCOE(bp)) {
1907 rx++;
1908 tx++;
6383c0b3 1909 }
6383c0b3
AE
1910
1911 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1912 if (rc) {
1913 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1914 return rc;
1915 }
1916 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1917 if (rc) {
1918 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1919 return rc;
1920 }
1921
51c1a580 1922 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1923 tx, rx);
1924
ec6ba945
VZ
1925 return rc;
1926}
1927
1191cb83 1928static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1929{
1930 int i;
1931
1932 for_each_queue(bp, i) {
1933 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1934 u32 mtu;
a8c94b91
VZ
1935
1936 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1937 if (IS_FCOE_IDX(i))
1938 /*
1939 * Although there are no IP frames expected to arrive to
1940 * this ring we still want to add an
1941 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1942 * overrun attack.
1943 */
e52fcb24 1944 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1945 else
e52fcb24
ED
1946 mtu = bp->dev->mtu;
1947 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1948 IP_HEADER_ALIGNMENT_PADDING +
1949 ETH_OVREHEAD +
1950 mtu +
1951 BNX2X_FW_RX_ALIGN_END;
16a5fd92 1952 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
1953 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1954 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1955 else
1956 fp->rx_frag_size = 0;
a8c94b91
VZ
1957 }
1958}
1959
60cad4e6 1960static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
1961{
1962 int i;
619c5cb6
VZ
1963 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1964
16a5fd92 1965 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
1966 * enabled
1967 */
5d317c6a
MS
1968 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1969 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1970 bp->fp->cl_id +
1971 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1972
1973 /*
1974 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1975 * per-port, so if explicit configuration is needed , do it only
1976 * for a PMF.
1977 *
1978 * For 57712 and newer on the other hand it's a per-function
1979 * configuration.
1980 */
5d317c6a 1981 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1982}
1983
60cad4e6
AE
1984int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1985 bool config_hash, bool enable)
619c5cb6 1986{
3b603066 1987 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1988
1989 /* Although RSS is meaningless when there is a single HW queue we
1990 * still need it enabled in order to have HW Rx hash generated.
1991 *
1992 * if (!is_eth_multi(bp))
1993 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1994 */
1995
96305234 1996 params.rss_obj = rss_obj;
619c5cb6
VZ
1997
1998 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1999
60cad4e6
AE
2000 if (enable) {
2001 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2002
2003 /* RSS configuration */
2004 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2005 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2006 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2007 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2008 if (rss_obj->udp_rss_v4)
2009 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2010 if (rss_obj->udp_rss_v6)
2011 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2012 } else {
2013 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2014 }
619c5cb6 2015
96305234
DK
2016 /* Hash bits */
2017 params.rss_result_mask = MULTI_MASK;
619c5cb6 2018
5d317c6a 2019 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2020
96305234
DK
2021 if (config_hash) {
2022 /* RSS keys */
60cad4e6 2023 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2024 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2025 }
2026
60cad4e6
AE
2027 if (IS_PF(bp))
2028 return bnx2x_config_rss(bp, &params);
2029 else
2030 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2031}
2032
1191cb83 2033static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2034{
3b603066 2035 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2036
2037 /* Prepare parameters for function state transitions */
2038 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2039
2040 func_params.f_obj = &bp->func_obj;
2041 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2042
2043 func_params.params.hw_init.load_phase = load_code;
2044
2045 return bnx2x_func_state_change(bp, &func_params);
2046}
2047
2048/*
2049 * Cleans the object that have internal lists without sending
16a5fd92 2050 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2051 */
7fa6f340 2052void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2053{
2054 int rc;
2055 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2056 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2057 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2058
2059 /***************** Cleanup MACs' object first *************************/
2060
2061 /* Wait for completion of requested */
2062 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2063 /* Perform a dry cleanup */
2064 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2065
2066 /* Clean ETH primary MAC */
2067 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2068 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2069 &ramrod_flags);
2070 if (rc != 0)
2071 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2072
2073 /* Cleanup UC list */
2074 vlan_mac_flags = 0;
2075 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2076 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2077 &ramrod_flags);
2078 if (rc != 0)
2079 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2080
2081 /***************** Now clean mcast object *****************************/
2082 rparam.mcast_obj = &bp->mcast_obj;
2083 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2084
8b09be5f
YM
2085 /* Add a DEL command... - Since we're doing a driver cleanup only,
2086 * we take a lock surrounding both the initial send and the CONTs,
2087 * as we don't want a true completion to disrupt us in the middle.
2088 */
2089 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2090 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2091 if (rc < 0)
51c1a580
MS
2092 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2093 rc);
619c5cb6
VZ
2094
2095 /* ...and wait until all pending commands are cleared */
2096 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2097 while (rc != 0) {
2098 if (rc < 0) {
2099 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2100 rc);
8b09be5f 2101 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2102 return;
2103 }
2104
2105 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2106 }
8b09be5f 2107 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2108}
2109
2110#ifndef BNX2X_STOP_ON_ERROR
2111#define LOAD_ERROR_EXIT(bp, label) \
2112 do { \
2113 (bp)->state = BNX2X_STATE_ERROR; \
2114 goto label; \
2115 } while (0)
55c11941
MS
2116
2117#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2118 do { \
2119 bp->cnic_loaded = false; \
2120 goto label; \
2121 } while (0)
2122#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2123#define LOAD_ERROR_EXIT(bp, label) \
2124 do { \
2125 (bp)->state = BNX2X_STATE_ERROR; \
2126 (bp)->panic = 1; \
2127 return -EBUSY; \
2128 } while (0)
55c11941
MS
2129#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2130 do { \
2131 bp->cnic_loaded = false; \
2132 (bp)->panic = 1; \
2133 return -EBUSY; \
2134 } while (0)
2135#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2136
ad5afc89
AE
2137static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2138{
2139 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2140 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2141 return;
2142}
2143
2144static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2145{
8db573ba 2146 int num_groups, vf_headroom = 0;
ad5afc89 2147 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2148
ad5afc89
AE
2149 /* number of queues for statistics is number of eth queues + FCoE */
2150 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2151
ad5afc89
AE
2152 /* Total number of FW statistics requests =
2153 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2154 * and fcoe l2 queue) stats + num of queues (which includes another 1
2155 * for fcoe l2 queue if applicable)
2156 */
2157 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2158
8db573ba
AE
2159 /* vf stats appear in the request list, but their data is allocated by
2160 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2161 * it is used to determine where to place the vf stats queries in the
2162 * request struct
2163 */
2164 if (IS_SRIOV(bp))
6411280a 2165 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2166
ad5afc89
AE
2167 /* Request is built from stats_query_header and an array of
2168 * stats_query_cmd_group each of which contains
2169 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2170 * configured in the stats_query_header.
2171 */
2172 num_groups =
8db573ba
AE
2173 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2174 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2175 1 : 0));
2176
8db573ba
AE
2177 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2178 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2179 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2180 num_groups * sizeof(struct stats_query_cmd_group);
2181
2182 /* Data for statistics requests + stats_counter
2183 * stats_counter holds per-STORM counters that are incremented
2184 * when STORM has finished with the current request.
2185 * memory for FCoE offloaded statistics are counted anyway,
2186 * even if they will not be sent.
2187 * VF stats are not accounted for here as the data of VF stats is stored
2188 * in memory allocated by the VF, not here.
2189 */
2190 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2191 sizeof(struct per_pf_stats) +
2192 sizeof(struct fcoe_statistics_params) +
2193 sizeof(struct per_queue_stats) * num_queue_stats +
2194 sizeof(struct stats_counter);
2195
2196 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2197 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2198
2199 /* Set shortcuts */
2200 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2201 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2202 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2203 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2204 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2205 bp->fw_stats_req_sz;
2206
6bf07b8e 2207 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2208 U64_HI(bp->fw_stats_req_mapping),
2209 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2210 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2211 U64_HI(bp->fw_stats_data_mapping),
2212 U64_LO(bp->fw_stats_data_mapping));
2213 return 0;
2214
2215alloc_mem_err:
2216 bnx2x_free_fw_stats_mem(bp);
2217 BNX2X_ERR("Can't allocate FW stats memory\n");
2218 return -ENOMEM;
2219}
2220
2221/* send load request to mcp and analyze response */
2222static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2223{
178135c1
DK
2224 u32 param;
2225
ad5afc89
AE
2226 /* init fw_seq */
2227 bp->fw_seq =
2228 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2229 DRV_MSG_SEQ_NUMBER_MASK);
2230 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2231
2232 /* Get current FW pulse sequence */
2233 bp->fw_drv_pulse_wr_seq =
2234 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2235 DRV_PULSE_SEQ_MASK);
2236 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2237
178135c1
DK
2238 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2239
2240 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2241 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2242
ad5afc89 2243 /* load request */
178135c1 2244 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2245
2246 /* if mcp fails to respond we must abort */
2247 if (!(*load_code)) {
2248 BNX2X_ERR("MCP response failure, aborting\n");
2249 return -EBUSY;
2250 }
2251
2252 /* If mcp refused (e.g. other port is in diagnostic mode) we
2253 * must abort
2254 */
2255 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2256 BNX2X_ERR("MCP refused load request, aborting\n");
2257 return -EBUSY;
2258 }
2259 return 0;
2260}
2261
2262/* check whether another PF has already loaded FW to chip. In
2263 * virtualized environments a pf from another VM may have already
2264 * initialized the device including loading FW
2265 */
2266int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2267{
2268 /* is another pf loaded on this engine? */
2269 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2270 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2271 /* build my FW version dword */
2272 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2273 (BCM_5710_FW_MINOR_VERSION << 8) +
2274 (BCM_5710_FW_REVISION_VERSION << 16) +
2275 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2276
2277 /* read loaded FW from chip */
2278 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2279
2280 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2281 loaded_fw, my_fw);
2282
2283 /* abort nic load if version mismatch */
2284 if (my_fw != loaded_fw) {
6bf07b8e 2285 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
452427b0 2286 loaded_fw, my_fw);
ad5afc89
AE
2287 return -EBUSY;
2288 }
2289 }
2290 return 0;
2291}
2292
2293/* returns the "mcp load_code" according to global load_count array */
2294static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2295{
2296 int path = BP_PATH(bp);
2297
2298 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2299 path, load_count[path][0], load_count[path][1],
2300 load_count[path][2]);
2301 load_count[path][0]++;
2302 load_count[path][1 + port]++;
2303 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2304 path, load_count[path][0], load_count[path][1],
2305 load_count[path][2]);
2306 if (load_count[path][0] == 1)
2307 return FW_MSG_CODE_DRV_LOAD_COMMON;
2308 else if (load_count[path][1 + port] == 1)
2309 return FW_MSG_CODE_DRV_LOAD_PORT;
2310 else
2311 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2312}
2313
2314/* mark PMF if applicable */
2315static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2316{
2317 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2318 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2319 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2320 bp->port.pmf = 1;
2321 /* We need the barrier to ensure the ordering between the
2322 * writing to bp->port.pmf here and reading it from the
2323 * bnx2x_periodic_task().
2324 */
2325 smp_mb();
2326 } else {
2327 bp->port.pmf = 0;
452427b0
YM
2328 }
2329
ad5afc89
AE
2330 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2331}
2332
2333static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2334{
2335 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2336 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2337 (bp->common.shmem2_base)) {
2338 if (SHMEM2_HAS(bp, dcc_support))
2339 SHMEM2_WR(bp, dcc_support,
2340 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2341 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2342 if (SHMEM2_HAS(bp, afex_driver_support))
2343 SHMEM2_WR(bp, afex_driver_support,
2344 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2345 }
2346
2347 /* Set AFEX default VLAN tag to an invalid value */
2348 bp->afex_def_vlan_tag = -1;
452427b0
YM
2349}
2350
1191cb83
ED
2351/**
2352 * bnx2x_bz_fp - zero content of the fastpath structure.
2353 *
2354 * @bp: driver handle
2355 * @index: fastpath index to be zeroed
2356 *
2357 * Makes sure the contents of the bp->fp[index].napi is kept
2358 * intact.
2359 */
2360static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2361{
2362 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2363 int cos;
1191cb83 2364 struct napi_struct orig_napi = fp->napi;
15192a8c 2365 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2366
1191cb83 2367 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2368 if (fp->tpa_info)
2369 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2370 sizeof(struct bnx2x_agg_info));
2371 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2372
2373 /* Restore the NAPI object as it has been already initialized */
2374 fp->napi = orig_napi;
15192a8c 2375 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2376 fp->bp = bp;
2377 fp->index = index;
2378 if (IS_ETH_FP(fp))
2379 fp->max_cos = bp->max_cos;
2380 else
2381 /* Special queues support only one CoS */
2382 fp->max_cos = 1;
2383
65565884 2384 /* Init txdata pointers */
65565884
MS
2385 if (IS_FCOE_FP(fp))
2386 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2387 if (IS_ETH_FP(fp))
2388 for_each_cos_in_tx_queue(fp, cos)
2389 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2390 BNX2X_NUM_ETH_QUEUES(bp) + index];
2391
16a5fd92 2392 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2393 * minimal size so it must be set prior to queue memory allocation
2394 */
2395 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2396 (bp->flags & GRO_ENABLE_FLAG &&
2397 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2398 if (bp->flags & TPA_ENABLE_FLAG)
2399 fp->mode = TPA_MODE_LRO;
2400 else if (bp->flags & GRO_ENABLE_FLAG)
2401 fp->mode = TPA_MODE_GRO;
2402
1191cb83
ED
2403 /* We don't want TPA on an FCoE L2 ring */
2404 if (IS_FCOE_FP(fp))
2405 fp->disable_tpa = 1;
55c11941
MS
2406}
2407
2408int bnx2x_load_cnic(struct bnx2x *bp)
2409{
2410 int i, rc, port = BP_PORT(bp);
2411
2412 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2413
2414 mutex_init(&bp->cnic_mutex);
2415
ad5afc89
AE
2416 if (IS_PF(bp)) {
2417 rc = bnx2x_alloc_mem_cnic(bp);
2418 if (rc) {
2419 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2420 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2421 }
55c11941
MS
2422 }
2423
2424 rc = bnx2x_alloc_fp_mem_cnic(bp);
2425 if (rc) {
2426 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2427 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2428 }
2429
2430 /* Update the number of queues with the cnic queues */
2431 rc = bnx2x_set_real_num_queues(bp, 1);
2432 if (rc) {
2433 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2434 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2435 }
2436
2437 /* Add all CNIC NAPI objects */
2438 bnx2x_add_all_napi_cnic(bp);
2439 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2440 bnx2x_napi_enable_cnic(bp);
2441
2442 rc = bnx2x_init_hw_func_cnic(bp);
2443 if (rc)
2444 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2445
2446 bnx2x_nic_init_cnic(bp);
2447
ad5afc89
AE
2448 if (IS_PF(bp)) {
2449 /* Enable Timer scan */
2450 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2451
2452 /* setup cnic queues */
2453 for_each_cnic_queue(bp, i) {
2454 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2455 if (rc) {
2456 BNX2X_ERR("Queue setup failed\n");
2457 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2458 }
55c11941
MS
2459 }
2460 }
2461
2462 /* Initialize Rx filter. */
8b09be5f 2463 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2464
2465 /* re-read iscsi info */
2466 bnx2x_get_iscsi_info(bp);
2467 bnx2x_setup_cnic_irq_info(bp);
2468 bnx2x_setup_cnic_info(bp);
2469 bp->cnic_loaded = true;
2470 if (bp->state == BNX2X_STATE_OPEN)
2471 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2472
55c11941
MS
2473 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2474
2475 return 0;
2476
2477#ifndef BNX2X_STOP_ON_ERROR
2478load_error_cnic2:
2479 /* Disable Timer scan */
2480 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2481
2482load_error_cnic1:
2483 bnx2x_napi_disable_cnic(bp);
2484 /* Update the number of queues without the cnic queues */
d9d81862 2485 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2486 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2487load_error_cnic0:
2488 BNX2X_ERR("CNIC-related load failed\n");
2489 bnx2x_free_fp_mem_cnic(bp);
2490 bnx2x_free_mem_cnic(bp);
2491 return rc;
2492#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2493}
2494
9f6c9258
DK
2495/* must be called with rtnl_lock */
2496int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2497{
619c5cb6 2498 int port = BP_PORT(bp);
ad5afc89 2499 int i, rc = 0, load_code = 0;
9f6c9258 2500
55c11941
MS
2501 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2502 DP(NETIF_MSG_IFUP,
2503 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2504
9f6c9258 2505#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2506 if (unlikely(bp->panic)) {
2507 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2508 return -EPERM;
51c1a580 2509 }
9f6c9258
DK
2510#endif
2511
2512 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2513
16a5fd92 2514 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2515 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2516 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2517 &bp->last_reported_link.link_report_flags);
2ae17f66 2518
ad5afc89
AE
2519 if (IS_PF(bp))
2520 /* must be called before memory allocation and HW init */
2521 bnx2x_ilt_set_info(bp);
523224a3 2522
6383c0b3
AE
2523 /*
2524 * Zero fastpath structures preserving invariants like napi, which are
2525 * allocated only once, fp index, max_cos, bp pointer.
65565884 2526 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2527 */
51c1a580 2528 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2529 for_each_queue(bp, i)
2530 bnx2x_bz_fp(bp, i);
55c11941
MS
2531 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2532 bp->num_cnic_queues) *
2533 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2534
55c11941 2535 bp->fcoe_init = false;
6383c0b3 2536
a8c94b91
VZ
2537 /* Set the receive queues buffer size */
2538 bnx2x_set_rx_buf_size(bp);
2539
ad5afc89
AE
2540 if (IS_PF(bp)) {
2541 rc = bnx2x_alloc_mem(bp);
2542 if (rc) {
2543 BNX2X_ERR("Unable to allocate bp memory\n");
2544 return rc;
2545 }
2546 }
2547
2548 /* Allocated memory for FW statistics */
2549 if (bnx2x_alloc_fw_stats_mem(bp))
2550 LOAD_ERROR_EXIT(bp, load_error0);
2551
2552 /* need to be done after alloc mem, since it's self adjusting to amount
2553 * of memory available for RSS queues
2554 */
2555 rc = bnx2x_alloc_fp_mem(bp);
2556 if (rc) {
2557 BNX2X_ERR("Unable to allocate memory for fps\n");
2558 LOAD_ERROR_EXIT(bp, load_error0);
2559 }
d6214d7a 2560
8d9ac297
AE
2561 /* request pf to initialize status blocks */
2562 if (IS_VF(bp)) {
2563 rc = bnx2x_vfpf_init(bp);
2564 if (rc)
2565 LOAD_ERROR_EXIT(bp, load_error0);
2566 }
2567
b3b83c3f
DK
2568 /* As long as bnx2x_alloc_mem() may possibly update
2569 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2570 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2571 */
55c11941 2572 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2573 if (rc) {
ec6ba945 2574 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2575 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2576 }
2577
6383c0b3 2578 /* configure multi cos mappings in kernel.
16a5fd92
YM
2579 * this configuration may be overridden by a multi class queue
2580 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2581 */
2582 bnx2x_setup_tc(bp->dev, bp->max_cos);
2583
26614ba5
MS
2584 /* Add all NAPI objects */
2585 bnx2x_add_all_napi(bp);
55c11941 2586 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2587 bnx2x_napi_enable(bp);
2588
ad5afc89
AE
2589 if (IS_PF(bp)) {
2590 /* set pf load just before approaching the MCP */
2591 bnx2x_set_pf_load(bp);
2592
2593 /* if mcp exists send load request and analyze response */
2594 if (!BP_NOMCP(bp)) {
2595 /* attempt to load pf */
2596 rc = bnx2x_nic_load_request(bp, &load_code);
2597 if (rc)
2598 LOAD_ERROR_EXIT(bp, load_error1);
2599
2600 /* what did mcp say? */
2601 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2602 if (rc) {
2603 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2604 LOAD_ERROR_EXIT(bp, load_error2);
2605 }
ad5afc89
AE
2606 } else {
2607 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2608 }
9f6c9258 2609
ad5afc89
AE
2610 /* mark pmf if applicable */
2611 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2612
ad5afc89
AE
2613 /* Init Function state controlling object */
2614 bnx2x__init_func_obj(bp);
6383c0b3 2615
ad5afc89
AE
2616 /* Initialize HW */
2617 rc = bnx2x_init_hw(bp, load_code);
2618 if (rc) {
2619 BNX2X_ERR("HW init failed, aborting\n");
2620 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2621 LOAD_ERROR_EXIT(bp, load_error2);
2622 }
9f6c9258
DK
2623 }
2624
ecf01c22
YM
2625 bnx2x_pre_irq_nic_init(bp);
2626
d6214d7a
DK
2627 /* Connect to IRQs */
2628 rc = bnx2x_setup_irqs(bp);
523224a3 2629 if (rc) {
ad5afc89
AE
2630 BNX2X_ERR("setup irqs failed\n");
2631 if (IS_PF(bp))
2632 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2633 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2634 }
2635
619c5cb6 2636 /* Init per-function objects */
ad5afc89 2637 if (IS_PF(bp)) {
ecf01c22
YM
2638 /* Setup NIC internals and enable interrupts */
2639 bnx2x_post_irq_nic_init(bp, load_code);
2640
ad5afc89 2641 bnx2x_init_bp_objs(bp);
b56e9670 2642 bnx2x_iov_nic_init(bp);
a3348722 2643
ad5afc89
AE
2644 /* Set AFEX default VLAN tag to an invalid value */
2645 bp->afex_def_vlan_tag = -1;
2646 bnx2x_nic_load_afex_dcc(bp, load_code);
2647 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2648 rc = bnx2x_func_start(bp);
2649 if (rc) {
2650 BNX2X_ERR("Function start failed!\n");
2651 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2652
619c5cb6 2653 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2654 }
9f6c9258 2655
ad5afc89
AE
2656 /* Send LOAD_DONE command to MCP */
2657 if (!BP_NOMCP(bp)) {
2658 load_code = bnx2x_fw_command(bp,
2659 DRV_MSG_CODE_LOAD_DONE, 0);
2660 if (!load_code) {
2661 BNX2X_ERR("MCP response failure, aborting\n");
2662 rc = -EBUSY;
2663 LOAD_ERROR_EXIT(bp, load_error3);
2664 }
2665 }
9f6c9258 2666
0c14e5ce
AE
2667 /* initialize FW coalescing state machines in RAM */
2668 bnx2x_update_coalesce(bp);
60cad4e6 2669 }
0c14e5ce 2670
60cad4e6
AE
2671 /* setup the leading queue */
2672 rc = bnx2x_setup_leading(bp);
2673 if (rc) {
2674 BNX2X_ERR("Setup leading failed!\n");
2675 LOAD_ERROR_EXIT(bp, load_error3);
2676 }
ad5afc89 2677
60cad4e6
AE
2678 /* set up the rest of the queues */
2679 for_each_nondefault_eth_queue(bp, i) {
2680 if (IS_PF(bp))
2681 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2682 else /* VF */
2683 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2684 if (rc) {
60cad4e6 2685 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2686 LOAD_ERROR_EXIT(bp, load_error3);
2687 }
60cad4e6 2688 }
8d9ac297 2689
60cad4e6
AE
2690 /* setup rss */
2691 rc = bnx2x_init_rss(bp);
2692 if (rc) {
2693 BNX2X_ERR("PF RSS init failed\n");
2694 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2695 }
619c5cb6 2696
523224a3
DK
2697 /* Now when Clients are configured we are ready to work */
2698 bp->state = BNX2X_STATE_OPEN;
2699
619c5cb6 2700 /* Configure a ucast MAC */
ad5afc89
AE
2701 if (IS_PF(bp))
2702 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2703 else /* vf */
f8f4f61a
DK
2704 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2705 true);
51c1a580
MS
2706 if (rc) {
2707 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2708 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2709 }
6e30dd4e 2710
ad5afc89 2711 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2712 bnx2x_update_max_mf_config(bp, bp->pending_max);
2713 bp->pending_max = 0;
2714 }
2715
ad5afc89
AE
2716 if (bp->port.pmf) {
2717 rc = bnx2x_initial_phy_init(bp, load_mode);
2718 if (rc)
2719 LOAD_ERROR_EXIT(bp, load_error3);
2720 }
c63da990 2721 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2722
619c5cb6
VZ
2723 /* Start fast path */
2724
2725 /* Initialize Rx filter. */
8b09be5f 2726 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2727
619c5cb6 2728 /* Start the Tx */
9f6c9258
DK
2729 switch (load_mode) {
2730 case LOAD_NORMAL:
16a5fd92 2731 /* Tx queue should be only re-enabled */
523224a3 2732 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2733 break;
2734
2735 case LOAD_OPEN:
2736 netif_tx_start_all_queues(bp->dev);
523224a3 2737 smp_mb__after_clear_bit();
9f6c9258
DK
2738 break;
2739
2740 case LOAD_DIAG:
8970b2e4 2741 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2742 bp->state = BNX2X_STATE_DIAG;
2743 break;
2744
2745 default:
2746 break;
2747 }
2748
00253a8c 2749 if (bp->port.pmf)
4c704899 2750 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2751 else
9f6c9258
DK
2752 bnx2x__link_status_update(bp);
2753
2754 /* start the timer */
2755 mod_timer(&bp->timer, jiffies + bp->current_interval);
2756
55c11941
MS
2757 if (CNIC_ENABLED(bp))
2758 bnx2x_load_cnic(bp);
9f6c9258 2759
ad5afc89
AE
2760 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2761 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2762 u32 val;
2763 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2764 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2765 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2766 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2767 }
2768
619c5cb6 2769 /* Wait for all pending SP commands to complete */
ad5afc89 2770 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2771 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2772 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2773 return -EBUSY;
2774 }
6891dd25 2775
9876879f
BW
2776 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2777 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2778 bnx2x_dcbx_init(bp, false);
2779
55c11941
MS
2780 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2781
9f6c9258
DK
2782 return 0;
2783
619c5cb6 2784#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2785load_error3:
ad5afc89
AE
2786 if (IS_PF(bp)) {
2787 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2788
ad5afc89
AE
2789 /* Clean queueable objects */
2790 bnx2x_squeeze_objects(bp);
2791 }
619c5cb6 2792
9f6c9258
DK
2793 /* Free SKBs, SGEs, TPA pool and driver internals */
2794 bnx2x_free_skbs(bp);
ec6ba945 2795 for_each_rx_queue(bp, i)
9f6c9258 2796 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2797
9f6c9258 2798 /* Release IRQs */
d6214d7a
DK
2799 bnx2x_free_irq(bp);
2800load_error2:
ad5afc89 2801 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2802 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2803 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2804 }
2805
2806 bp->port.pmf = 0;
9f6c9258
DK
2807load_error1:
2808 bnx2x_napi_disable(bp);
722c6f58 2809 bnx2x_del_all_napi(bp);
ad5afc89 2810
889b9af3 2811 /* clear pf_load status, as it was already set */
ad5afc89
AE
2812 if (IS_PF(bp))
2813 bnx2x_clear_pf_load(bp);
d6214d7a 2814load_error0:
ad5afc89
AE
2815 bnx2x_free_fp_mem(bp);
2816 bnx2x_free_fw_stats_mem(bp);
9f6c9258
DK
2817 bnx2x_free_mem(bp);
2818
2819 return rc;
619c5cb6 2820#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2821}
2822
7fa6f340 2823int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2824{
2825 u8 rc = 0, cos, i;
2826
2827 /* Wait until tx fastpath tasks complete */
2828 for_each_tx_queue(bp, i) {
2829 struct bnx2x_fastpath *fp = &bp->fp[i];
2830
2831 for_each_cos_in_tx_queue(fp, cos)
2832 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2833 if (rc)
2834 return rc;
2835 }
2836 return 0;
2837}
2838
9f6c9258 2839/* must be called with rtnl_lock */
5d07d868 2840int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2841{
2842 int i;
c9ee9206
VZ
2843 bool global = false;
2844
55c11941
MS
2845 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2846
9ce392d4 2847 /* mark driver is unloaded in shmem2 */
ad5afc89 2848 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2849 u32 val;
2850 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2851 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2852 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2853 }
2854
80bfe5cc 2855 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2856 (bp->state == BNX2X_STATE_CLOSED ||
2857 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2858 /* We can get here if the driver has been unloaded
2859 * during parity error recovery and is either waiting for a
2860 * leader to complete or for other functions to unload and
2861 * then ifdown has been issued. In this case we want to
2862 * unload and let other functions to complete a recovery
2863 * process.
2864 */
9f6c9258
DK
2865 bp->recovery_state = BNX2X_RECOVERY_DONE;
2866 bp->is_leader = 0;
c9ee9206
VZ
2867 bnx2x_release_leader_lock(bp);
2868 smp_mb();
2869
51c1a580
MS
2870 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2871 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2872 return -EINVAL;
2873 }
2874
80bfe5cc 2875 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2876 * have not completed successfully - all resources are released.
80bfe5cc
YM
2877 *
2878 * we can get here only after unsuccessful ndo_* callback, during which
2879 * dev->IFF_UP flag is still on.
2880 */
2881 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2882 return 0;
2883
2884 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2885 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2886 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2887 */
2888 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2889 smp_mb();
2890
78c3bcc5
AE
2891 /* indicate to VFs that the PF is going down */
2892 bnx2x_iov_channel_down(bp);
2893
55c11941
MS
2894 if (CNIC_LOADED(bp))
2895 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2896
9505ee37
VZ
2897 /* Stop Tx */
2898 bnx2x_tx_disable(bp);
65565884 2899 netdev_reset_tc(bp->dev);
9505ee37 2900
9f6c9258 2901 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2902
9f6c9258 2903 del_timer_sync(&bp->timer);
f85582f8 2904
ad5afc89
AE
2905 if (IS_PF(bp)) {
2906 /* Set ALWAYS_ALIVE bit in shmem */
2907 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2908 bnx2x_drv_pulse(bp);
2909 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2910 bnx2x_save_statistics(bp);
2911 }
9f6c9258 2912
ad5afc89
AE
2913 /* wait till consumers catch up with producers in all queues */
2914 bnx2x_drain_tx_queues(bp);
9f6c9258 2915
9b176b6b
AE
2916 /* if VF indicate to PF this function is going down (PF will delete sp
2917 * elements and clear initializations
2918 */
2919 if (IS_VF(bp))
2920 bnx2x_vfpf_close_vf(bp);
2921 else if (unload_mode != UNLOAD_RECOVERY)
2922 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2923 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2924 else {
c9ee9206
VZ
2925 /* Send the UNLOAD_REQUEST to the MCP */
2926 bnx2x_send_unload_req(bp, unload_mode);
2927
16a5fd92 2928 /* Prevent transactions to host from the functions on the
c9ee9206 2929 * engine that doesn't reset global blocks in case of global
16a5fd92 2930 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
2931 * (the engine which leader will perform the recovery
2932 * last).
2933 */
2934 if (!CHIP_IS_E1x(bp))
2935 bnx2x_pf_disable(bp);
2936
2937 /* Disable HW interrupts, NAPI */
523224a3 2938 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2939 /* Delete all NAPI objects */
2940 bnx2x_del_all_napi(bp);
55c11941
MS
2941 if (CNIC_LOADED(bp))
2942 bnx2x_del_all_napi_cnic(bp);
523224a3 2943 /* Release IRQs */
d6214d7a 2944 bnx2x_free_irq(bp);
c9ee9206
VZ
2945
2946 /* Report UNLOAD_DONE to MCP */
5d07d868 2947 bnx2x_send_unload_done(bp, false);
523224a3 2948 }
9f6c9258 2949
619c5cb6 2950 /*
16a5fd92 2951 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
2952 * the queueable objects here in case they failed to get cleaned so far.
2953 */
ad5afc89
AE
2954 if (IS_PF(bp))
2955 bnx2x_squeeze_objects(bp);
619c5cb6 2956
79616895
VZ
2957 /* There should be no more pending SP commands at this stage */
2958 bp->sp_state = 0;
2959
9f6c9258
DK
2960 bp->port.pmf = 0;
2961
2962 /* Free SKBs, SGEs, TPA pool and driver internals */
2963 bnx2x_free_skbs(bp);
55c11941
MS
2964 if (CNIC_LOADED(bp))
2965 bnx2x_free_skbs_cnic(bp);
ec6ba945 2966 for_each_rx_queue(bp, i)
9f6c9258 2967 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2968
ad5afc89
AE
2969 bnx2x_free_fp_mem(bp);
2970 if (CNIC_LOADED(bp))
55c11941 2971 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2972
ad5afc89 2973 if (IS_PF(bp)) {
ad5afc89
AE
2974 if (CNIC_LOADED(bp))
2975 bnx2x_free_mem_cnic(bp);
2976 }
b4cddbd6
AE
2977 bnx2x_free_mem(bp);
2978
9f6c9258 2979 bp->state = BNX2X_STATE_CLOSED;
55c11941 2980 bp->cnic_loaded = false;
9f6c9258 2981
c9ee9206
VZ
2982 /* Check if there are pending parity attentions. If there are - set
2983 * RECOVERY_IN_PROGRESS.
2984 */
ad5afc89 2985 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2986 bnx2x_set_reset_in_progress(bp);
2987
2988 /* Set RESET_IS_GLOBAL if needed */
2989 if (global)
2990 bnx2x_set_reset_global(bp);
2991 }
2992
9f6c9258
DK
2993 /* The last driver must disable a "close the gate" if there is no
2994 * parity attention or "process kill" pending.
2995 */
ad5afc89
AE
2996 if (IS_PF(bp) &&
2997 !bnx2x_clear_pf_load(bp) &&
2998 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2999 bnx2x_disable_close_the_gate(bp);
3000
55c11941
MS
3001 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3002
9f6c9258
DK
3003 return 0;
3004}
f85582f8 3005
9f6c9258
DK
3006int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3007{
3008 u16 pmcsr;
3009
adf5f6a1 3010 /* If there is no power capability, silently succeed */
29ed74c3 3011 if (!bp->pdev->pm_cap) {
51c1a580 3012 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3013 return 0;
3014 }
3015
29ed74c3 3016 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3017
3018 switch (state) {
3019 case PCI_D0:
29ed74c3 3020 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3021 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3022 PCI_PM_CTRL_PME_STATUS));
3023
3024 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3025 /* delay required during transition out of D3hot */
3026 msleep(20);
3027 break;
3028
3029 case PCI_D3hot:
3030 /* If there are other clients above don't
3031 shut down the power */
3032 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3033 return 0;
3034 /* Don't shut down the power for emulation and FPGA */
3035 if (CHIP_REV_IS_SLOW(bp))
3036 return 0;
3037
3038 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3039 pmcsr |= 3;
3040
3041 if (bp->wol)
3042 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3043
29ed74c3 3044 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3045 pmcsr);
3046
3047 /* No more memory access after this point until
3048 * device is brought back to D0.
3049 */
3050 break;
3051
3052 default:
51c1a580 3053 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3054 return -EINVAL;
3055 }
3056 return 0;
3057}
3058
9f6c9258
DK
3059/*
3060 * net_device service functions
3061 */
d6214d7a 3062int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3063{
3064 int work_done = 0;
6383c0b3 3065 u8 cos;
9f6c9258
DK
3066 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3067 napi);
3068 struct bnx2x *bp = fp->bp;
3069
3070 while (1) {
3071#ifdef BNX2X_STOP_ON_ERROR
3072 if (unlikely(bp->panic)) {
3073 napi_complete(napi);
3074 return 0;
3075 }
3076#endif
8f20aa57
DK
3077 if (!bnx2x_fp_lock_napi(fp))
3078 return work_done;
9f6c9258 3079
6383c0b3 3080 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3081 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3082 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3083
9f6c9258
DK
3084 if (bnx2x_has_rx_work(fp)) {
3085 work_done += bnx2x_rx_int(fp, budget - work_done);
3086
3087 /* must not complete if we consumed full budget */
8f20aa57
DK
3088 if (work_done >= budget) {
3089 bnx2x_fp_unlock_napi(fp);
9f6c9258 3090 break;
8f20aa57 3091 }
9f6c9258
DK
3092 }
3093
3094 /* Fall out from the NAPI loop if needed */
8f20aa57
DK
3095 if (!bnx2x_fp_unlock_napi(fp) &&
3096 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3097
ec6ba945
VZ
3098 /* No need to update SB for FCoE L2 ring as long as
3099 * it's connected to the default SB and the SB
3100 * has been updated when NAPI was scheduled.
3101 */
3102 if (IS_FCOE_FP(fp)) {
3103 napi_complete(napi);
3104 break;
3105 }
9f6c9258 3106 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3107 /* bnx2x_has_rx_work() reads the status block,
3108 * thus we need to ensure that status block indices
3109 * have been actually read (bnx2x_update_fpsb_idx)
3110 * prior to this check (bnx2x_has_rx_work) so that
3111 * we won't write the "newer" value of the status block
3112 * to IGU (if there was a DMA right after
3113 * bnx2x_has_rx_work and if there is no rmb, the memory
3114 * reading (bnx2x_update_fpsb_idx) may be postponed
3115 * to right before bnx2x_ack_sb). In this case there
3116 * will never be another interrupt until there is
3117 * another update of the status block, while there
3118 * is still unhandled work.
3119 */
9f6c9258
DK
3120 rmb();
3121
3122 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3123 napi_complete(napi);
3124 /* Re-enable interrupts */
51c1a580 3125 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3126 "Update index to %d\n", fp->fp_hc_idx);
3127 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3128 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3129 IGU_INT_ENABLE, 1);
3130 break;
3131 }
3132 }
3133 }
3134
3135 return work_done;
3136}
3137
e0d1095a 3138#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3139/* must be called with local_bh_disable()d */
3140int bnx2x_low_latency_recv(struct napi_struct *napi)
3141{
3142 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3143 napi);
3144 struct bnx2x *bp = fp->bp;
3145 int found = 0;
3146
3147 if ((bp->state == BNX2X_STATE_CLOSED) ||
3148 (bp->state == BNX2X_STATE_ERROR) ||
3149 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3150 return LL_FLUSH_FAILED;
3151
3152 if (!bnx2x_fp_lock_poll(fp))
3153 return LL_FLUSH_BUSY;
3154
75b29459 3155 if (bnx2x_has_rx_work(fp))
8f20aa57 3156 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3157
3158 bnx2x_fp_unlock_poll(fp);
3159
3160 return found;
3161}
3162#endif
3163
9f6c9258
DK
3164/* we split the first BD into headers and data BDs
3165 * to ease the pain of our fellow microcode engineers
3166 * we use one mapping for both BDs
9f6c9258 3167 */
91226790
DK
3168static u16 bnx2x_tx_split(struct bnx2x *bp,
3169 struct bnx2x_fp_txdata *txdata,
3170 struct sw_tx_bd *tx_buf,
3171 struct eth_tx_start_bd **tx_bd, u16 hlen,
3172 u16 bd_prod)
9f6c9258
DK
3173{
3174 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3175 struct eth_tx_bd *d_tx_bd;
3176 dma_addr_t mapping;
3177 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3178
3179 /* first fix first BD */
9f6c9258
DK
3180 h_tx_bd->nbytes = cpu_to_le16(hlen);
3181
91226790
DK
3182 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3183 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3184
3185 /* now get a new data BD
3186 * (after the pbd) and fill it */
3187 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3188 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3189
3190 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3191 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3192
3193 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3194 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3195 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3196
3197 /* this marks the BD as one that has no individual mapping */
3198 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3199
3200 DP(NETIF_MSG_TX_QUEUED,
3201 "TSO split data size is %d (%x:%x)\n",
3202 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3203
3204 /* update tx_bd */
3205 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3206
3207 return bd_prod;
3208}
3209
86564c3f
YM
3210#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3211#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3212static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3213{
86564c3f
YM
3214 __sum16 tsum = (__force __sum16) csum;
3215
9f6c9258 3216 if (fix > 0)
86564c3f
YM
3217 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3218 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3219
3220 else if (fix < 0)
86564c3f
YM
3221 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3222 csum_partial(t_header, -fix, 0)));
9f6c9258 3223
e2593fcd 3224 return bswab16(tsum);
9f6c9258
DK
3225}
3226
91226790 3227static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3228{
3229 u32 rc;
a848ade4
DK
3230 __u8 prot = 0;
3231 __be16 protocol;
9f6c9258
DK
3232
3233 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3234 return XMIT_PLAIN;
9f6c9258 3235
a848ade4
DK
3236 protocol = vlan_get_protocol(skb);
3237 if (protocol == htons(ETH_P_IPV6)) {
3238 rc = XMIT_CSUM_V6;
3239 prot = ipv6_hdr(skb)->nexthdr;
3240 } else {
3241 rc = XMIT_CSUM_V4;
3242 prot = ip_hdr(skb)->protocol;
3243 }
9f6c9258 3244
a848ade4
DK
3245 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3246 if (inner_ip_hdr(skb)->version == 6) {
3247 rc |= XMIT_CSUM_ENC_V6;
3248 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3249 rc |= XMIT_CSUM_TCP;
9f6c9258 3250 } else {
a848ade4
DK
3251 rc |= XMIT_CSUM_ENC_V4;
3252 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3253 rc |= XMIT_CSUM_TCP;
3254 }
3255 }
a848ade4
DK
3256 if (prot == IPPROTO_TCP)
3257 rc |= XMIT_CSUM_TCP;
9f6c9258 3258
a848ade4 3259 if (skb_is_gso_v6(skb)) {
e768fb29 3260 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
a848ade4
DK
3261 if (rc & XMIT_CSUM_ENC)
3262 rc |= XMIT_GSO_ENC_V6;
3263 } else if (skb_is_gso(skb)) {
e768fb29 3264 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
a848ade4
DK
3265 if (rc & XMIT_CSUM_ENC)
3266 rc |= XMIT_GSO_ENC_V4;
3267 }
9f6c9258
DK
3268
3269 return rc;
3270}
3271
3272#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3273/* check if packet requires linearization (packet is too fragmented)
3274 no need to check fragmentation if page size > 8K (there will be no
3275 violation to FW restrictions) */
3276static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3277 u32 xmit_type)
3278{
3279 int to_copy = 0;
3280 int hlen = 0;
3281 int first_bd_sz = 0;
3282
3283 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3284 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3285
3286 if (xmit_type & XMIT_GSO) {
3287 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3288 /* Check if LSO packet needs to be copied:
3289 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3290 int wnd_size = MAX_FETCH_BD - 3;
3291 /* Number of windows to check */
3292 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3293 int wnd_idx = 0;
3294 int frag_idx = 0;
3295 u32 wnd_sum = 0;
3296
3297 /* Headers length */
3298 hlen = (int)(skb_transport_header(skb) - skb->data) +
3299 tcp_hdrlen(skb);
3300
3301 /* Amount of data (w/o headers) on linear part of SKB*/
3302 first_bd_sz = skb_headlen(skb) - hlen;
3303
3304 wnd_sum = first_bd_sz;
3305
3306 /* Calculate the first sum - it's special */
3307 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3308 wnd_sum +=
9e903e08 3309 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3310
3311 /* If there was data on linear skb data - check it */
3312 if (first_bd_sz > 0) {
3313 if (unlikely(wnd_sum < lso_mss)) {
3314 to_copy = 1;
3315 goto exit_lbl;
3316 }
3317
3318 wnd_sum -= first_bd_sz;
3319 }
3320
3321 /* Others are easier: run through the frag list and
3322 check all windows */
3323 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3324 wnd_sum +=
9e903e08 3325 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3326
3327 if (unlikely(wnd_sum < lso_mss)) {
3328 to_copy = 1;
3329 break;
3330 }
3331 wnd_sum -=
9e903e08 3332 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3333 }
3334 } else {
3335 /* in non-LSO too fragmented packet should always
3336 be linearized */
3337 to_copy = 1;
3338 }
3339 }
3340
3341exit_lbl:
3342 if (unlikely(to_copy))
3343 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3344 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3345 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3346 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3347
3348 return to_copy;
3349}
3350#endif
3351
91226790
DK
3352static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3353 u32 xmit_type)
f2e0899f 3354{
a848ade4
DK
3355 struct ipv6hdr *ipv6;
3356
2297a2da
VZ
3357 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3358 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3359 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3360
3361 if (xmit_type & XMIT_GSO_ENC_V6)
3362 ipv6 = inner_ipv6_hdr(skb);
3363 else if (xmit_type & XMIT_GSO_V6)
3364 ipv6 = ipv6_hdr(skb);
3365 else
3366 ipv6 = NULL;
3367
3368 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3369 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3370}
3371
3372/**
e8920674 3373 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3374 *
e8920674
DK
3375 * @skb: packet skb
3376 * @pbd: parse BD
3377 * @xmit_type: xmit flags
f2e0899f 3378 */
91226790
DK
3379static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3380 struct eth_tx_parse_bd_e1x *pbd,
057cf65e 3381 struct eth_tx_start_bd *tx_start_bd,
91226790 3382 u32 xmit_type)
f2e0899f
DK
3383{
3384 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3385 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3386 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3387
3388 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3389 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3390 pbd->tcp_pseudo_csum =
86564c3f
YM
3391 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3392 ip_hdr(skb)->daddr,
3393 0, IPPROTO_TCP, 0));
f2e0899f 3394
057cf65e
YM
3395 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3396 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3397 } else {
f2e0899f 3398 pbd->tcp_pseudo_csum =
86564c3f
YM
3399 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3400 &ipv6_hdr(skb)->daddr,
3401 0, IPPROTO_TCP, 0));
057cf65e 3402 }
f2e0899f 3403
86564c3f
YM
3404 pbd->global_data |=
3405 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3406}
f85582f8 3407
a848ade4
DK
3408/**
3409 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3410 *
3411 * @bp: driver handle
3412 * @skb: packet skb
3413 * @parsing_data: data to be updated
3414 * @xmit_type: xmit flags
3415 *
3416 * 57712/578xx related, when skb has encapsulation
3417 */
3418static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3419 u32 *parsing_data, u32 xmit_type)
3420{
3421 *parsing_data |=
3422 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3423 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3424 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3425
3426 if (xmit_type & XMIT_CSUM_TCP) {
3427 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3428 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3429 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3430
3431 return skb_inner_transport_header(skb) +
3432 inner_tcp_hdrlen(skb) - skb->data;
3433 }
3434
3435 /* We support checksum offload for TCP and UDP only.
3436 * No need to pass the UDP header length - it's a constant.
3437 */
3438 return skb_inner_transport_header(skb) +
3439 sizeof(struct udphdr) - skb->data;
3440}
3441
f2e0899f 3442/**
e8920674 3443 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3444 *
e8920674
DK
3445 * @bp: driver handle
3446 * @skb: packet skb
3447 * @parsing_data: data to be updated
3448 * @xmit_type: xmit flags
f2e0899f 3449 *
91226790 3450 * 57712/578xx related
f2e0899f 3451 */
91226790
DK
3452static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3453 u32 *parsing_data, u32 xmit_type)
f2e0899f 3454{
e39aece7 3455 *parsing_data |=
2de67439 3456 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3457 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3458 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3459
e39aece7
VZ
3460 if (xmit_type & XMIT_CSUM_TCP) {
3461 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3462 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3463 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3464
e39aece7 3465 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3466 }
3467 /* We support checksum offload for TCP and UDP only.
3468 * No need to pass the UDP header length - it's a constant.
3469 */
3470 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3471}
3472
a848ade4 3473/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3474static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3475 struct eth_tx_start_bd *tx_start_bd,
3476 u32 xmit_type)
93ef5c02 3477{
93ef5c02
DK
3478 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3479
a848ade4 3480 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3481 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3482
3483 if (!(xmit_type & XMIT_CSUM_TCP))
3484 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3485}
3486
f2e0899f 3487/**
e8920674 3488 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3489 *
e8920674
DK
3490 * @bp: driver handle
3491 * @skb: packet skb
3492 * @pbd: parse BD to be updated
3493 * @xmit_type: xmit flags
f2e0899f 3494 */
91226790
DK
3495static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3496 struct eth_tx_parse_bd_e1x *pbd,
3497 u32 xmit_type)
f2e0899f 3498{
e39aece7 3499 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3500
3501 /* for now NS flag is not used in Linux */
3502 pbd->global_data =
86564c3f
YM
3503 cpu_to_le16(hlen |
3504 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3505 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3506
3507 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3508 skb_network_header(skb)) >> 1;
f2e0899f 3509
e39aece7
VZ
3510 hlen += pbd->ip_hlen_w;
3511
3512 /* We support checksum offload for TCP and UDP only */
3513 if (xmit_type & XMIT_CSUM_TCP)
3514 hlen += tcp_hdrlen(skb) / 2;
3515 else
3516 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3517
3518 pbd->total_hlen_w = cpu_to_le16(hlen);
3519 hlen = hlen*2;
3520
3521 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3522 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3523
3524 } else {
3525 s8 fix = SKB_CS_OFF(skb); /* signed! */
3526
3527 DP(NETIF_MSG_TX_QUEUED,
3528 "hlen %d fix %d csum before fix %x\n",
3529 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3530
3531 /* HW bug: fixup the CSUM */
3532 pbd->tcp_pseudo_csum =
3533 bnx2x_csum_fix(skb_transport_header(skb),
3534 SKB_CS(skb), fix);
3535
3536 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3537 pbd->tcp_pseudo_csum);
3538 }
3539
3540 return hlen;
3541}
f85582f8 3542
a848ade4
DK
3543static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3544 struct eth_tx_parse_bd_e2 *pbd_e2,
3545 struct eth_tx_parse_2nd_bd *pbd2,
3546 u16 *global_data,
3547 u32 xmit_type)
3548{
e287a75c 3549 u16 hlen_w = 0;
a848ade4 3550 u8 outerip_off, outerip_len = 0;
e768fb29 3551
e287a75c
DK
3552 /* from outer IP to transport */
3553 hlen_w = (skb_inner_transport_header(skb) -
3554 skb_network_header(skb)) >> 1;
a848ade4
DK
3555
3556 /* transport len */
e768fb29 3557 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3558
e287a75c 3559 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3560
e768fb29
DK
3561 /* outer IP header info */
3562 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3563 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3564 u32 csum = (__force u32)(~iph->check) -
3565 (__force u32)iph->tot_len -
3566 (__force u32)iph->frag_off;
c957d09f 3567
a848ade4 3568 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3569 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3570 } else {
3571 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3572 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3573 }
3574
3575 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3576
3577 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3578
3579 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3580 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3581
3582 pbd_e2->data.tunnel_data.pseudo_csum =
3583 bswab16(~csum_tcpudp_magic(
3584 inner_ip_hdr(skb)->saddr,
3585 inner_ip_hdr(skb)->daddr,
3586 0, IPPROTO_TCP, 0));
3587
3588 outerip_len = ip_hdr(skb)->ihl << 1;
3589 } else {
3590 pbd_e2->data.tunnel_data.pseudo_csum =
3591 bswab16(~csum_ipv6_magic(
3592 &inner_ipv6_hdr(skb)->saddr,
3593 &inner_ipv6_hdr(skb)->daddr,
3594 0, IPPROTO_TCP, 0));
3595 }
3596
3597 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3598
3599 *global_data |=
3600 outerip_off |
3601 (!!(xmit_type & XMIT_CSUM_V6) <<
3602 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3603 (outerip_len <<
3604 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3605 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3606 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3607
3608 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3609 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3610 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3611 }
a848ade4
DK
3612}
3613
9f6c9258
DK
3614/* called with netif_tx_lock
3615 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3616 * netif_wake_queue()
3617 */
3618netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3619{
3620 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3621
9f6c9258 3622 struct netdev_queue *txq;
6383c0b3 3623 struct bnx2x_fp_txdata *txdata;
9f6c9258 3624 struct sw_tx_bd *tx_buf;
619c5cb6 3625 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3626 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3627 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3628 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3629 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3630 u32 pbd_e2_parsing_data = 0;
9f6c9258 3631 u16 pkt_prod, bd_prod;
65565884 3632 int nbd, txq_index;
9f6c9258
DK
3633 dma_addr_t mapping;
3634 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3635 int i;
3636 u8 hlen = 0;
3637 __le16 pkt_size = 0;
3638 struct ethhdr *eth;
3639 u8 mac_type = UNICAST_ADDRESS;
3640
3641#ifdef BNX2X_STOP_ON_ERROR
3642 if (unlikely(bp->panic))
3643 return NETDEV_TX_BUSY;
3644#endif
3645
6383c0b3
AE
3646 txq_index = skb_get_queue_mapping(skb);
3647 txq = netdev_get_tx_queue(dev, txq_index);
3648
55c11941 3649 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3650
65565884 3651 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3652
3653 /* enable this debug print to view the transmission queue being used
51c1a580 3654 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3655 txq_index, fp_index, txdata_index); */
9f6c9258 3656
16a5fd92 3657 /* enable this debug print to view the transmission details
51c1a580
MS
3658 DP(NETIF_MSG_TX_QUEUED,
3659 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3660 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3661
6383c0b3 3662 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3663 skb_shinfo(skb)->nr_frags +
3664 BDS_PER_TX_PKT +
3665 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3666 /* Handle special storage cases separately */
c96bdc0c
DK
3667 if (txdata->tx_ring_size == 0) {
3668 struct bnx2x_eth_q_stats *q_stats =
3669 bnx2x_fp_qstats(bp, txdata->parent_fp);
3670 q_stats->driver_filtered_tx_pkt++;
3671 dev_kfree_skb(skb);
3672 return NETDEV_TX_OK;
3673 }
2de67439
YM
3674 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3675 netif_tx_stop_queue(txq);
c96bdc0c 3676 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3677
9f6c9258
DK
3678 return NETDEV_TX_BUSY;
3679 }
3680
51c1a580 3681 DP(NETIF_MSG_TX_QUEUED,
04c46736 3682 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3683 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3684 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3685 skb->len);
9f6c9258
DK
3686
3687 eth = (struct ethhdr *)skb->data;
3688
3689 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3690 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3691 if (is_broadcast_ether_addr(eth->h_dest))
3692 mac_type = BROADCAST_ADDRESS;
3693 else
3694 mac_type = MULTICAST_ADDRESS;
3695 }
3696
91226790 3697#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3698 /* First, check if we need to linearize the skb (due to FW
3699 restrictions). No need to check fragmentation if page size > 8K
3700 (there will be no violation to FW restrictions) */
3701 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3702 /* Statistics of linearization */
3703 bp->lin_cnt++;
3704 if (skb_linearize(skb) != 0) {
51c1a580
MS
3705 DP(NETIF_MSG_TX_QUEUED,
3706 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3707 dev_kfree_skb_any(skb);
3708 return NETDEV_TX_OK;
3709 }
3710 }
3711#endif
619c5cb6
VZ
3712 /* Map skb linear data for DMA */
3713 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3714 skb_headlen(skb), DMA_TO_DEVICE);
3715 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3716 DP(NETIF_MSG_TX_QUEUED,
3717 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3718 dev_kfree_skb_any(skb);
3719 return NETDEV_TX_OK;
3720 }
9f6c9258
DK
3721 /*
3722 Please read carefully. First we use one BD which we mark as start,
3723 then we have a parsing info BD (used for TSO or xsum),
3724 and only then we have the rest of the TSO BDs.
3725 (don't forget to mark the last one as last,
3726 and to unmap only AFTER you write to the BD ...)
3727 And above all, all pdb sizes are in words - NOT DWORDS!
3728 */
3729
619c5cb6
VZ
3730 /* get current pkt produced now - advance it just before sending packet
3731 * since mapping of pages may fail and cause packet to be dropped
3732 */
6383c0b3
AE
3733 pkt_prod = txdata->tx_pkt_prod;
3734 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3735
619c5cb6
VZ
3736 /* get a tx_buf and first BD
3737 * tx_start_bd may be changed during SPLIT,
3738 * but first_bd will always stay first
3739 */
6383c0b3
AE
3740 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3741 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3742 first_bd = tx_start_bd;
9f6c9258
DK
3743
3744 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3745
91226790
DK
3746 /* header nbd: indirectly zero other flags! */
3747 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3748
3749 /* remember the first BD of the packet */
6383c0b3 3750 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3751 tx_buf->skb = skb;
3752 tx_buf->flags = 0;
3753
3754 DP(NETIF_MSG_TX_QUEUED,
3755 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3756 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3757
eab6d18d 3758 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3759 tx_start_bd->vlan_or_ethertype =
3760 cpu_to_le16(vlan_tx_tag_get(skb));
3761 tx_start_bd->bd_flags.as_bitfield |=
3762 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3763 } else {
3764 /* when transmitting in a vf, start bd must hold the ethertype
3765 * for fw to enforce it
3766 */
91226790 3767 if (IS_VF(bp))
dc1ba591
AE
3768 tx_start_bd->vlan_or_ethertype =
3769 cpu_to_le16(ntohs(eth->h_proto));
91226790 3770 else
dc1ba591
AE
3771 /* used by FW for packet accounting */
3772 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3773 }
9f6c9258 3774
91226790
DK
3775 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3776
9f6c9258
DK
3777 /* turn on parsing and get a BD */
3778 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3779
93ef5c02
DK
3780 if (xmit_type & XMIT_CSUM)
3781 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3782
619c5cb6 3783 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3784 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3785 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3786
3787 if (xmit_type & XMIT_CSUM_ENC) {
3788 u16 global_data = 0;
3789
3790 /* Set PBD in enc checksum offload case */
3791 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3792 &pbd_e2_parsing_data,
3793 xmit_type);
3794
3795 /* turn on 2nd parsing and get a BD */
3796 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3797
3798 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3799
3800 memset(pbd2, 0, sizeof(*pbd2));
3801
3802 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3803 (skb_inner_network_header(skb) -
3804 skb->data) >> 1;
3805
3806 if (xmit_type & XMIT_GSO_ENC)
3807 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3808 &global_data,
3809 xmit_type);
3810
3811 pbd2->global_data = cpu_to_le16(global_data);
3812
3813 /* add addition parse BD indication to start BD */
3814 SET_FLAG(tx_start_bd->general_data,
3815 ETH_TX_START_BD_PARSE_NBDS, 1);
3816 /* set encapsulation flag in start BD */
3817 SET_FLAG(tx_start_bd->general_data,
3818 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3819 nbd++;
3820 } else if (xmit_type & XMIT_CSUM) {
91226790 3821 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3822 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3823 &pbd_e2_parsing_data,
3824 xmit_type);
a848ade4 3825 }
dc1ba591 3826
91226790
DK
3827 /* Add the macs to the parsing BD this is a vf */
3828 if (IS_VF(bp)) {
3829 /* override GRE parameters in BD */
3830 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3831 &pbd_e2->data.mac_addr.src_mid,
3832 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3833 eth->h_source);
91226790
DK
3834
3835 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3836 &pbd_e2->data.mac_addr.dst_mid,
3837 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3838 eth->h_dest);
3839 }
96bed4b9
YM
3840
3841 SET_FLAG(pbd_e2_parsing_data,
3842 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3843 } else {
96bed4b9 3844 u16 global_data = 0;
6383c0b3 3845 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3846 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3847 /* Set PBD in checksum offload case */
3848 if (xmit_type & XMIT_CSUM)
3849 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3850
96bed4b9
YM
3851 SET_FLAG(global_data,
3852 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3853 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3854 }
3855
f85582f8 3856 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3857 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3858 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3859 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3860 pkt_size = tx_start_bd->nbytes;
3861
51c1a580 3862 DP(NETIF_MSG_TX_QUEUED,
91226790 3863 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3864 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3865 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3866 tx_start_bd->bd_flags.as_bitfield,
3867 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3868
3869 if (xmit_type & XMIT_GSO) {
3870
3871 DP(NETIF_MSG_TX_QUEUED,
3872 "TSO packet len %d hlen %d total len %d tso size %d\n",
3873 skb->len, hlen, skb_headlen(skb),
3874 skb_shinfo(skb)->gso_size);
3875
3876 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3877
91226790
DK
3878 if (unlikely(skb_headlen(skb) > hlen)) {
3879 nbd++;
6383c0b3
AE
3880 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3881 &tx_start_bd, hlen,
91226790
DK
3882 bd_prod);
3883 }
619c5cb6 3884 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3885 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3886 xmit_type);
f2e0899f 3887 else
44dbc78e 3888 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
9f6c9258 3889 }
2297a2da
VZ
3890
3891 /* Set the PBD's parsing_data field if not zero
3892 * (for the chips newer than 57711).
3893 */
3894 if (pbd_e2_parsing_data)
3895 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3896
9f6c9258
DK
3897 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3898
f85582f8 3899 /* Handle fragmented skb */
9f6c9258
DK
3900 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3901 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3902
9e903e08
ED
3903 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3904 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3905 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3906 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3907
51c1a580
MS
3908 DP(NETIF_MSG_TX_QUEUED,
3909 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3910
3911 /* we need unmap all buffers already mapped
3912 * for this SKB;
3913 * first_bd->nbd need to be properly updated
3914 * before call to bnx2x_free_tx_pkt
3915 */
3916 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3917 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3918 TX_BD(txdata->tx_pkt_prod),
3919 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3920 return NETDEV_TX_OK;
3921 }
3922
9f6c9258 3923 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3924 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3925 if (total_pkt_bd == NULL)
6383c0b3 3926 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3927
9f6c9258
DK
3928 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3929 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3930 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3931 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3932 nbd++;
9f6c9258
DK
3933
3934 DP(NETIF_MSG_TX_QUEUED,
3935 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3936 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3937 le16_to_cpu(tx_data_bd->nbytes));
3938 }
3939
3940 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3941
619c5cb6
VZ
3942 /* update with actual num BDs */
3943 first_bd->nbd = cpu_to_le16(nbd);
3944
9f6c9258
DK
3945 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3946
3947 /* now send a tx doorbell, counting the next BD
3948 * if the packet contains or ends with it
3949 */
3950 if (TX_BD_POFF(bd_prod) < nbd)
3951 nbd++;
3952
619c5cb6
VZ
3953 /* total_pkt_bytes should be set on the first data BD if
3954 * it's not an LSO packet and there is more than one
3955 * data BD. In this case pkt_size is limited by an MTU value.
3956 * However we prefer to set it for an LSO packet (while we don't
3957 * have to) in order to save some CPU cycles in a none-LSO
3958 * case, when we much more care about them.
3959 */
9f6c9258
DK
3960 if (total_pkt_bd != NULL)
3961 total_pkt_bd->total_pkt_bytes = pkt_size;
3962
523224a3 3963 if (pbd_e1x)
9f6c9258 3964 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3965 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3966 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3967 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3968 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3969 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3970 if (pbd_e2)
3971 DP(NETIF_MSG_TX_QUEUED,
3972 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
3973 pbd_e2,
3974 pbd_e2->data.mac_addr.dst_hi,
3975 pbd_e2->data.mac_addr.dst_mid,
3976 pbd_e2->data.mac_addr.dst_lo,
3977 pbd_e2->data.mac_addr.src_hi,
3978 pbd_e2->data.mac_addr.src_mid,
3979 pbd_e2->data.mac_addr.src_lo,
f2e0899f 3980 pbd_e2->parsing_data);
9f6c9258
DK
3981 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3982
2df1a70a
TH
3983 netdev_tx_sent_queue(txq, skb->len);
3984
8373c57d
WB
3985 skb_tx_timestamp(skb);
3986
6383c0b3 3987 txdata->tx_pkt_prod++;
9f6c9258
DK
3988 /*
3989 * Make sure that the BD data is updated before updating the producer
3990 * since FW might read the BD right after the producer is updated.
3991 * This is only applicable for weak-ordered memory model archs such
3992 * as IA-64. The following barrier is also mandatory since FW will
3993 * assumes packets must have BDs.
3994 */
3995 wmb();
3996
6383c0b3 3997 txdata->tx_db.data.prod += nbd;
9f6c9258 3998 barrier();
f85582f8 3999
6383c0b3 4000 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4001
4002 mmiowb();
4003
6383c0b3 4004 txdata->tx_bd_prod += nbd;
9f6c9258 4005
7df2dc6b 4006 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4007 netif_tx_stop_queue(txq);
4008
4009 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4010 * ordering of set_bit() in netif_tx_stop_queue() and read of
4011 * fp->bd_tx_cons */
4012 smp_mb();
4013
15192a8c 4014 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4015 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4016 netif_tx_wake_queue(txq);
4017 }
6383c0b3 4018 txdata->tx_pkt++;
9f6c9258
DK
4019
4020 return NETDEV_TX_OK;
4021}
f85582f8 4022
6383c0b3
AE
4023/**
4024 * bnx2x_setup_tc - routine to configure net_device for multi tc
4025 *
4026 * @netdev: net device to configure
4027 * @tc: number of traffic classes to enable
4028 *
4029 * callback connected to the ndo_setup_tc function pointer
4030 */
4031int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4032{
4033 int cos, prio, count, offset;
4034 struct bnx2x *bp = netdev_priv(dev);
4035
4036 /* setup tc must be called under rtnl lock */
4037 ASSERT_RTNL();
4038
16a5fd92 4039 /* no traffic classes requested. Aborting */
6383c0b3
AE
4040 if (!num_tc) {
4041 netdev_reset_tc(dev);
4042 return 0;
4043 }
4044
4045 /* requested to support too many traffic classes */
4046 if (num_tc > bp->max_cos) {
6bf07b8e 4047 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4048 num_tc, bp->max_cos);
6383c0b3
AE
4049 return -EINVAL;
4050 }
4051
4052 /* declare amount of supported traffic classes */
4053 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4054 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4055 return -EINVAL;
4056 }
4057
4058 /* configure priority to traffic class mapping */
4059 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4060 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4061 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4062 "mapping priority %d to tc %d\n",
6383c0b3
AE
4063 prio, bp->prio_to_cos[prio]);
4064 }
4065
16a5fd92 4066 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4067 This can be used for ets or pfc, and save the effort of setting
4068 up a multio class queue disc or negotiating DCBX with a switch
4069 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4070 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4071 for (prio = 1; prio < 16; prio++) {
4072 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4073 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4074 } */
4075
4076 /* configure traffic class to transmission queue mapping */
4077 for (cos = 0; cos < bp->max_cos; cos++) {
4078 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4079 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4080 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4081 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4082 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4083 cos, offset, count);
4084 }
4085
4086 return 0;
4087}
4088
9f6c9258
DK
4089/* called with rtnl_lock */
4090int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4091{
4092 struct sockaddr *addr = p;
4093 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4094 int rc = 0;
9f6c9258 4095
51c1a580
MS
4096 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4097 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4098 return -EINVAL;
51c1a580 4099 }
614c76df 4100
a3348722
BW
4101 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4102 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4103 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4104 return -EINVAL;
51c1a580 4105 }
9f6c9258 4106
619c5cb6
VZ
4107 if (netif_running(dev)) {
4108 rc = bnx2x_set_eth_mac(bp, false);
4109 if (rc)
4110 return rc;
4111 }
4112
9f6c9258 4113 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4114
523224a3 4115 if (netif_running(dev))
619c5cb6 4116 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4117
619c5cb6 4118 return rc;
9f6c9258
DK
4119}
4120
b3b83c3f
DK
4121static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4122{
4123 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4124 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4125 u8 cos;
b3b83c3f
DK
4126
4127 /* Common */
55c11941 4128
b3b83c3f
DK
4129 if (IS_FCOE_IDX(fp_index)) {
4130 memset(sb, 0, sizeof(union host_hc_status_block));
4131 fp->status_blk_mapping = 0;
b3b83c3f 4132 } else {
b3b83c3f 4133 /* status blocks */
619c5cb6 4134 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4135 BNX2X_PCI_FREE(sb->e2_sb,
4136 bnx2x_fp(bp, fp_index,
4137 status_blk_mapping),
4138 sizeof(struct host_hc_status_block_e2));
4139 else
4140 BNX2X_PCI_FREE(sb->e1x_sb,
4141 bnx2x_fp(bp, fp_index,
4142 status_blk_mapping),
4143 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4144 }
55c11941 4145
b3b83c3f
DK
4146 /* Rx */
4147 if (!skip_rx_queue(bp, fp_index)) {
4148 bnx2x_free_rx_bds(fp);
4149
4150 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4151 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4152 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4153 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4154 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4155
4156 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4157 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4158 sizeof(struct eth_fast_path_rx_cqe) *
4159 NUM_RCQ_BD);
4160
4161 /* SGE ring */
4162 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4163 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4164 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4165 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4166 }
4167
4168 /* Tx */
4169 if (!skip_tx_queue(bp, fp_index)) {
4170 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4171 for_each_cos_in_tx_queue(fp, cos) {
65565884 4172 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4173
51c1a580 4174 DP(NETIF_MSG_IFDOWN,
94f05b0f 4175 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4176 fp_index, cos, txdata->cid);
4177
4178 BNX2X_FREE(txdata->tx_buf_ring);
4179 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4180 txdata->tx_desc_mapping,
4181 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4182 }
b3b83c3f
DK
4183 }
4184 /* end of fastpath */
4185}
4186
55c11941
MS
4187void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4188{
4189 int i;
4190 for_each_cnic_queue(bp, i)
4191 bnx2x_free_fp_mem_at(bp, i);
4192}
4193
b3b83c3f
DK
4194void bnx2x_free_fp_mem(struct bnx2x *bp)
4195{
4196 int i;
55c11941 4197 for_each_eth_queue(bp, i)
b3b83c3f
DK
4198 bnx2x_free_fp_mem_at(bp, i);
4199}
4200
1191cb83 4201static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4202{
4203 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4204 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4205 bnx2x_fp(bp, index, sb_index_values) =
4206 (__le16 *)status_blk.e2_sb->sb.index_values;
4207 bnx2x_fp(bp, index, sb_running_index) =
4208 (__le16 *)status_blk.e2_sb->sb.running_index;
4209 } else {
4210 bnx2x_fp(bp, index, sb_index_values) =
4211 (__le16 *)status_blk.e1x_sb->sb.index_values;
4212 bnx2x_fp(bp, index, sb_running_index) =
4213 (__le16 *)status_blk.e1x_sb->sb.running_index;
4214 }
4215}
4216
1191cb83
ED
4217/* Returns the number of actually allocated BDs */
4218static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4219 int rx_ring_size)
4220{
4221 struct bnx2x *bp = fp->bp;
4222 u16 ring_prod, cqe_ring_prod;
4223 int i, failure_cnt = 0;
4224
4225 fp->rx_comp_cons = 0;
4226 cqe_ring_prod = ring_prod = 0;
4227
4228 /* This routine is called only during fo init so
4229 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4230 */
4231 for (i = 0; i < rx_ring_size; i++) {
996dedba 4232 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4233 failure_cnt++;
4234 continue;
4235 }
4236 ring_prod = NEXT_RX_IDX(ring_prod);
4237 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4238 WARN_ON(ring_prod <= (i - failure_cnt));
4239 }
4240
4241 if (failure_cnt)
4242 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4243 i - failure_cnt, fp->index);
4244
4245 fp->rx_bd_prod = ring_prod;
4246 /* Limit the CQE producer by the CQE ring size */
4247 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4248 cqe_ring_prod);
4249 fp->rx_pkt = fp->rx_calls = 0;
4250
15192a8c 4251 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4252
4253 return i - failure_cnt;
4254}
4255
4256static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4257{
4258 int i;
4259
4260 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4261 struct eth_rx_cqe_next_page *nextpg;
4262
4263 nextpg = (struct eth_rx_cqe_next_page *)
4264 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4265 nextpg->addr_hi =
4266 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4267 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4268 nextpg->addr_lo =
4269 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4270 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4271 }
4272}
4273
b3b83c3f
DK
4274static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4275{
4276 union host_hc_status_block *sb;
4277 struct bnx2x_fastpath *fp = &bp->fp[index];
4278 int ring_size = 0;
6383c0b3 4279 u8 cos;
c2188952 4280 int rx_ring_size = 0;
b3b83c3f 4281
a3348722
BW
4282 if (!bp->rx_ring_size &&
4283 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4284 rx_ring_size = MIN_RX_SIZE_NONTPA;
4285 bp->rx_ring_size = rx_ring_size;
55c11941 4286 } else if (!bp->rx_ring_size) {
c2188952
VZ
4287 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4288
065f8b92
YM
4289 if (CHIP_IS_E3(bp)) {
4290 u32 cfg = SHMEM_RD(bp,
4291 dev_info.port_hw_config[BP_PORT(bp)].
4292 default_cfg);
4293
4294 /* Decrease ring size for 1G functions */
4295 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4296 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4297 rx_ring_size /= 10;
4298 }
d760fc37 4299
c2188952
VZ
4300 /* allocate at least number of buffers required by FW */
4301 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4302 MIN_RX_SIZE_TPA, rx_ring_size);
4303
4304 bp->rx_ring_size = rx_ring_size;
614c76df 4305 } else /* if rx_ring_size specified - use it */
c2188952 4306 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4307
04c46736
YM
4308 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4309
b3b83c3f
DK
4310 /* Common */
4311 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4312
b3b83c3f 4313 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4314 /* status blocks */
619c5cb6 4315 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4316 BNX2X_PCI_ALLOC(sb->e2_sb,
4317 &bnx2x_fp(bp, index, status_blk_mapping),
4318 sizeof(struct host_hc_status_block_e2));
4319 else
4320 BNX2X_PCI_ALLOC(sb->e1x_sb,
4321 &bnx2x_fp(bp, index, status_blk_mapping),
4322 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4323 }
8eef2af1
DK
4324
4325 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4326 * set shortcuts for it.
4327 */
4328 if (!IS_FCOE_IDX(index))
4329 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4330
4331 /* Tx */
4332 if (!skip_tx_queue(bp, index)) {
4333 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4334 for_each_cos_in_tx_queue(fp, cos) {
65565884 4335 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4336
51c1a580
MS
4337 DP(NETIF_MSG_IFUP,
4338 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4339 index, cos);
4340
4341 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4342 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4343 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4344 &txdata->tx_desc_mapping,
b3b83c3f 4345 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4346 }
b3b83c3f
DK
4347 }
4348
4349 /* Rx */
4350 if (!skip_rx_queue(bp, index)) {
4351 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4352 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4353 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4354 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4355 &bnx2x_fp(bp, index, rx_desc_mapping),
4356 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4357
75b29459
DK
4358 /* Seed all CQEs by 1s */
4359 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4360 &bnx2x_fp(bp, index, rx_comp_mapping),
4361 sizeof(struct eth_fast_path_rx_cqe) *
4362 NUM_RCQ_BD);
b3b83c3f
DK
4363
4364 /* SGE ring */
4365 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4366 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4367 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4368 &bnx2x_fp(bp, index, rx_sge_mapping),
4369 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4370 /* RX BD ring */
4371 bnx2x_set_next_page_rx_bd(fp);
4372
4373 /* CQ ring */
4374 bnx2x_set_next_page_rx_cq(fp);
4375
4376 /* BDs */
4377 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4378 if (ring_size < rx_ring_size)
4379 goto alloc_mem_err;
4380 }
4381
4382 return 0;
4383
4384/* handles low memory cases */
4385alloc_mem_err:
4386 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4387 index, ring_size);
4388 /* FW will drop all packets if queue is not big enough,
4389 * In these cases we disable the queue
6383c0b3 4390 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4391 */
4392 if (ring_size < (fp->disable_tpa ?
eb722d7a 4393 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4394 /* release memory allocated for this queue */
4395 bnx2x_free_fp_mem_at(bp, index);
4396 return -ENOMEM;
4397 }
4398 return 0;
4399}
4400
55c11941
MS
4401int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4402{
4403 if (!NO_FCOE(bp))
4404 /* FCoE */
4405 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4406 /* we will fail load process instead of mark
4407 * NO_FCOE_FLAG
4408 */
4409 return -ENOMEM;
4410
4411 return 0;
4412}
4413
b3b83c3f
DK
4414int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4415{
4416 int i;
4417
55c11941
MS
4418 /* 1. Allocate FP for leading - fatal if error
4419 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4420 */
4421
4422 /* leading */
4423 if (bnx2x_alloc_fp_mem_at(bp, 0))
4424 return -ENOMEM;
6383c0b3 4425
b3b83c3f
DK
4426 /* RSS */
4427 for_each_nondefault_eth_queue(bp, i)
4428 if (bnx2x_alloc_fp_mem_at(bp, i))
4429 break;
4430
4431 /* handle memory failures */
4432 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4433 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4434
4435 WARN_ON(delta < 0);
4864a16a 4436 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4437 if (CNIC_SUPPORT(bp))
4438 /* move non eth FPs next to last eth FP
4439 * must be done in that order
4440 * FCOE_IDX < FWD_IDX < OOO_IDX
4441 */
b3b83c3f 4442
55c11941
MS
4443 /* move FCoE fp even NO_FCOE_FLAG is on */
4444 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4445 bp->num_ethernet_queues -= delta;
4446 bp->num_queues = bp->num_ethernet_queues +
4447 bp->num_cnic_queues;
b3b83c3f
DK
4448 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4449 bp->num_queues + delta, bp->num_queues);
4450 }
4451
4452 return 0;
4453}
d6214d7a 4454
523224a3
DK
4455void bnx2x_free_mem_bp(struct bnx2x *bp)
4456{
c3146eb6
DK
4457 int i;
4458
4459 for (i = 0; i < bp->fp_array_size; i++)
4460 kfree(bp->fp[i].tpa_info);
523224a3 4461 kfree(bp->fp);
15192a8c
BW
4462 kfree(bp->sp_objs);
4463 kfree(bp->fp_stats);
65565884 4464 kfree(bp->bnx2x_txq);
523224a3
DK
4465 kfree(bp->msix_table);
4466 kfree(bp->ilt);
4467}
4468
0329aba1 4469int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4470{
4471 struct bnx2x_fastpath *fp;
4472 struct msix_entry *tbl;
4473 struct bnx2x_ilt *ilt;
6383c0b3 4474 int msix_table_size = 0;
55c11941 4475 int fp_array_size, txq_array_size;
15192a8c 4476 int i;
6383c0b3
AE
4477
4478 /*
4479 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4480 * path IGU SBs plus default SB (for PF only).
6383c0b3 4481 */
1ab4434c
AE
4482 msix_table_size = bp->igu_sb_cnt;
4483 if (IS_PF(bp))
4484 msix_table_size++;
4485 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4486
6383c0b3 4487 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4488 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4489 bp->fp_array_size = fp_array_size;
4490 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4491
c3146eb6 4492 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4493 if (!fp)
4494 goto alloc_err;
c3146eb6 4495 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4496 fp[i].tpa_info =
4497 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4498 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4499 if (!(fp[i].tpa_info))
4500 goto alloc_err;
4501 }
4502
523224a3
DK
4503 bp->fp = fp;
4504
15192a8c 4505 /* allocate sp objs */
c3146eb6 4506 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4507 GFP_KERNEL);
4508 if (!bp->sp_objs)
4509 goto alloc_err;
4510
4511 /* allocate fp_stats */
c3146eb6 4512 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4513 GFP_KERNEL);
4514 if (!bp->fp_stats)
4515 goto alloc_err;
4516
65565884 4517 /* Allocate memory for the transmission queues array */
55c11941
MS
4518 txq_array_size =
4519 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4520 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4521
4522 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4523 GFP_KERNEL);
65565884
MS
4524 if (!bp->bnx2x_txq)
4525 goto alloc_err;
4526
523224a3 4527 /* msix table */
01e23742 4528 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4529 if (!tbl)
4530 goto alloc_err;
4531 bp->msix_table = tbl;
4532
4533 /* ilt */
4534 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4535 if (!ilt)
4536 goto alloc_err;
4537 bp->ilt = ilt;
4538
4539 return 0;
4540alloc_err:
4541 bnx2x_free_mem_bp(bp);
4542 return -ENOMEM;
523224a3
DK
4543}
4544
a9fccec7 4545int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4546{
4547 struct bnx2x *bp = netdev_priv(dev);
4548
4549 if (unlikely(!netif_running(dev)))
4550 return 0;
4551
5d07d868 4552 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4553 return bnx2x_nic_load(bp, LOAD_NORMAL);
4554}
4555
1ac9e428
YR
4556int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4557{
4558 u32 sel_phy_idx = 0;
4559 if (bp->link_params.num_phys <= 1)
4560 return INT_PHY;
4561
4562 if (bp->link_vars.link_up) {
4563 sel_phy_idx = EXT_PHY1;
4564 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4565 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4566 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4567 sel_phy_idx = EXT_PHY2;
4568 } else {
4569
4570 switch (bnx2x_phy_selection(&bp->link_params)) {
4571 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4572 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4573 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4574 sel_phy_idx = EXT_PHY1;
4575 break;
4576 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4577 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4578 sel_phy_idx = EXT_PHY2;
4579 break;
4580 }
4581 }
4582
4583 return sel_phy_idx;
1ac9e428
YR
4584}
4585int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4586{
4587 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4588 /*
2de67439 4589 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4590 * swapping is enabled). So when swapping is enabled, we need to reverse
4591 * the configuration
4592 */
4593
4594 if (bp->link_params.multi_phy_config &
4595 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4596 if (sel_phy_idx == EXT_PHY1)
4597 sel_phy_idx = EXT_PHY2;
4598 else if (sel_phy_idx == EXT_PHY2)
4599 sel_phy_idx = EXT_PHY1;
4600 }
4601 return LINK_CONFIG_IDX(sel_phy_idx);
4602}
4603
55c11941 4604#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4605int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4606{
4607 struct bnx2x *bp = netdev_priv(dev);
4608 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4609
4610 switch (type) {
4611 case NETDEV_FCOE_WWNN:
4612 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4613 cp->fcoe_wwn_node_name_lo);
4614 break;
4615 case NETDEV_FCOE_WWPN:
4616 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4617 cp->fcoe_wwn_port_name_lo);
4618 break;
4619 default:
51c1a580 4620 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4621 return -EINVAL;
4622 }
4623
4624 return 0;
4625}
4626#endif
4627
9f6c9258
DK
4628/* called with rtnl_lock */
4629int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4630{
4631 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4632
4633 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4634 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4635 return -EAGAIN;
4636 }
4637
4638 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4639 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4640 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4641 return -EINVAL;
51c1a580 4642 }
9f6c9258
DK
4643
4644 /* This does not race with packet allocation
4645 * because the actual alloc size is
4646 * only updated as part of load
4647 */
4648 dev->mtu = new_mtu;
4649
66371c44
MM
4650 return bnx2x_reload_if_running(dev);
4651}
4652
c8f44aff 4653netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4654 netdev_features_t features)
66371c44
MM
4655{
4656 struct bnx2x *bp = netdev_priv(dev);
4657
4658 /* TPA requires Rx CSUM offloading */
621b4d66 4659 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4660 features &= ~NETIF_F_LRO;
621b4d66
DK
4661 features &= ~NETIF_F_GRO;
4662 }
66371c44
MM
4663
4664 return features;
4665}
4666
c8f44aff 4667int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4668{
4669 struct bnx2x *bp = netdev_priv(dev);
4670 u32 flags = bp->flags;
8802f579 4671 u32 changes;
538dd2e3 4672 bool bnx2x_reload = false;
66371c44
MM
4673
4674 if (features & NETIF_F_LRO)
4675 flags |= TPA_ENABLE_FLAG;
4676 else
4677 flags &= ~TPA_ENABLE_FLAG;
4678
621b4d66
DK
4679 if (features & NETIF_F_GRO)
4680 flags |= GRO_ENABLE_FLAG;
4681 else
4682 flags &= ~GRO_ENABLE_FLAG;
4683
538dd2e3
MB
4684 if (features & NETIF_F_LOOPBACK) {
4685 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4686 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4687 bnx2x_reload = true;
4688 }
4689 } else {
4690 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4691 bp->link_params.loopback_mode = LOOPBACK_NONE;
4692 bnx2x_reload = true;
4693 }
4694 }
4695
8802f579
ED
4696 changes = flags ^ bp->flags;
4697
16a5fd92 4698 /* if GRO is changed while LRO is enabled, don't force a reload */
8802f579
ED
4699 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4700 changes &= ~GRO_ENABLE_FLAG;
4701
4702 if (changes)
538dd2e3 4703 bnx2x_reload = true;
8802f579
ED
4704
4705 bp->flags = flags;
66371c44 4706
538dd2e3 4707 if (bnx2x_reload) {
66371c44
MM
4708 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4709 return bnx2x_reload_if_running(dev);
4710 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4711 }
4712
66371c44 4713 return 0;
9f6c9258
DK
4714}
4715
4716void bnx2x_tx_timeout(struct net_device *dev)
4717{
4718 struct bnx2x *bp = netdev_priv(dev);
4719
4720#ifdef BNX2X_STOP_ON_ERROR
4721 if (!bp->panic)
4722 bnx2x_panic();
4723#endif
7be08a72
AE
4724
4725 smp_mb__before_clear_bit();
4726 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4727 smp_mb__after_clear_bit();
4728
9f6c9258 4729 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4730 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4731}
4732
9f6c9258
DK
4733int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4734{
4735 struct net_device *dev = pci_get_drvdata(pdev);
4736 struct bnx2x *bp;
4737
4738 if (!dev) {
4739 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4740 return -ENODEV;
4741 }
4742 bp = netdev_priv(dev);
4743
4744 rtnl_lock();
4745
4746 pci_save_state(pdev);
4747
4748 if (!netif_running(dev)) {
4749 rtnl_unlock();
4750 return 0;
4751 }
4752
4753 netif_device_detach(dev);
4754
5d07d868 4755 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4756
4757 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4758
4759 rtnl_unlock();
4760
4761 return 0;
4762}
4763
4764int bnx2x_resume(struct pci_dev *pdev)
4765{
4766 struct net_device *dev = pci_get_drvdata(pdev);
4767 struct bnx2x *bp;
4768 int rc;
4769
4770 if (!dev) {
4771 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4772 return -ENODEV;
4773 }
4774 bp = netdev_priv(dev);
4775
4776 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4777 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4778 return -EAGAIN;
4779 }
4780
4781 rtnl_lock();
4782
4783 pci_restore_state(pdev);
4784
4785 if (!netif_running(dev)) {
4786 rtnl_unlock();
4787 return 0;
4788 }
4789
4790 bnx2x_set_power_state(bp, PCI_D0);
4791 netif_device_attach(dev);
4792
4793 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4794
4795 rtnl_unlock();
4796
4797 return rc;
4798}
619c5cb6 4799
619c5cb6
VZ
4800void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4801 u32 cid)
4802{
b9871bcf
AE
4803 if (!cxt) {
4804 BNX2X_ERR("bad context pointer %p\n", cxt);
4805 return;
4806 }
4807
619c5cb6
VZ
4808 /* ustorm cxt validation */
4809 cxt->ustorm_ag_context.cdu_usage =
4810 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4811 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4812 /* xcontext validation */
4813 cxt->xstorm_ag_context.cdu_reserved =
4814 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4815 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4816}
4817
1191cb83
ED
4818static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4819 u8 fw_sb_id, u8 sb_index,
4820 u8 ticks)
619c5cb6 4821{
619c5cb6
VZ
4822 u32 addr = BAR_CSTRORM_INTMEM +
4823 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4824 REG_WR8(bp, addr, ticks);
51c1a580
MS
4825 DP(NETIF_MSG_IFUP,
4826 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4827 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4828}
4829
1191cb83
ED
4830static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4831 u16 fw_sb_id, u8 sb_index,
4832 u8 disable)
619c5cb6
VZ
4833{
4834 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4835 u32 addr = BAR_CSTRORM_INTMEM +
4836 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 4837 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
4838 /* clear and set */
4839 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4840 flags |= enable_flag;
0c14e5ce 4841 REG_WR8(bp, addr, flags);
51c1a580
MS
4842 DP(NETIF_MSG_IFUP,
4843 "port %x fw_sb_id %d sb_index %d disable %d\n",
4844 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4845}
4846
4847void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4848 u8 sb_index, u8 disable, u16 usec)
4849{
4850 int port = BP_PORT(bp);
4851 u8 ticks = usec / BNX2X_BTR;
4852
4853 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4854
4855 disable = disable ? 1 : (usec ? 0 : 1);
4856 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4857}