Merge branch 'sh_eth'
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
076bb0c8 27#include <net/busy_poll.h>
c0cba59e 28#include <linux/prefetch.h>
9f6c9258 29#include "bnx2x_cmn.h"
523224a3 30#include "bnx2x_init.h"
042181f5 31#include "bnx2x_sp.h"
9f6c9258 32
b3b83c3f
DK
33/**
34 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
65565884
MS
43 * source onto the target. Update txdata pointers and related
44 * content.
b3b83c3f
DK
45 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 56 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
57
58 /* Copy the NAPI object as it has been already initialized */
59 from_fp->napi = to_fp->napi;
60
b3b83c3f
DK
61 /* Move bnx2x_fastpath contents */
62 memcpy(to_fp, from_fp, sizeof(*to_fp));
63 to_fp->index = to;
65565884 64
34d5626a
YM
65 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
67 */
68 to_fp->tpa_info = old_tpa_info;
69
15192a8c
BW
70 /* move sp_objs contents as well, as their indices match fp ones */
71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
72
73 /* move fp_stats contents as well, as their indices match fp ones */
74 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
75
65565884
MS
76 /* Update txdata pointers in fp and move txdata content accordingly:
77 * Each fp consumes 'max_cos' txdata structures, so the index should be
78 * decremented by max_cos x delta.
79 */
80
81 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
82 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
83 (bp)->max_cos;
84 if (from == FCOE_IDX(bp)) {
85 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
86 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
87 }
88
4864a16a
YM
89 memcpy(&bp->bnx2x_txq[new_txdata_index],
90 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
91 sizeof(struct bnx2x_fp_txdata));
92 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
93}
94
8ca5e17e
AE
95/**
96 * bnx2x_fill_fw_str - Fill buffer with FW version string.
97 *
98 * @bp: driver handle
99 * @buf: character buffer to fill with the fw name
100 * @buf_len: length of the above buffer
101 *
102 */
103void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
104{
105 if (IS_PF(bp)) {
106 u8 phy_fw_ver[PHY_FW_VER_LEN];
107
108 phy_fw_ver[0] = '\0';
109 bnx2x_get_ext_phy_fw_version(&bp->link_params,
110 phy_fw_ver, PHY_FW_VER_LEN);
111 strlcpy(buf, bp->fw_ver, buf_len);
112 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
113 "bc %d.%d.%d%s%s",
114 (bp->common.bc_ver & 0xff0000) >> 16,
115 (bp->common.bc_ver & 0xff00) >> 8,
116 (bp->common.bc_ver & 0xff),
117 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
118 } else {
6411280a 119 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
120 }
121}
122
4864a16a
YM
123/**
124 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
125 *
126 * @bp: driver handle
127 * @delta: number of eth queues which were not allocated
128 */
129static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
130{
131 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
132
133 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 134 * backward along the array could cause memory to be overridden
4864a16a
YM
135 */
136 for (cos = 1; cos < bp->max_cos; cos++) {
137 for (i = 0; i < old_eth_num - delta; i++) {
138 struct bnx2x_fastpath *fp = &bp->fp[i];
139 int new_idx = cos * (old_eth_num - delta) + i;
140
141 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
142 sizeof(struct bnx2x_fp_txdata));
143 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
144 }
145 }
146}
147
619c5cb6
VZ
148int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
149
9f6c9258
DK
150/* free skb in the packet ring at pos idx
151 * return idx of last bd freed
152 */
6383c0b3 153static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
154 u16 idx, unsigned int *pkts_compl,
155 unsigned int *bytes_compl)
9f6c9258 156{
6383c0b3 157 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
158 struct eth_tx_start_bd *tx_start_bd;
159 struct eth_tx_bd *tx_data_bd;
160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd;
163
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end);
166
51c1a580 167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 168 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
169
170 /* unmap first bd */
6383c0b3 171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
174
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR
177 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
178 BNX2X_ERR("BAD nbd!\n");
179 bnx2x_panic();
180 }
181#endif
182 new_cons = nbd + tx_buf->first_bd;
183
184 /* Get the next bd */
185 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
186
187 /* Skip a parse bd... */
188 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190
191 /* ...and the TSO split header bd since they have no mapping */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 }
196
197 /* now free frags */
198 while (nbd > 0) {
199
6383c0b3 200 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
201 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
202 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
203 if (--nbd)
204 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
205 }
206
207 /* release skb */
208 WARN_ON(!skb);
d8290ae5 209 if (likely(skb)) {
2df1a70a
TH
210 (*pkts_compl)++;
211 (*bytes_compl) += skb->len;
212 }
d8290ae5 213
40955532 214 dev_kfree_skb_any(skb);
9f6c9258
DK
215 tx_buf->first_bd = 0;
216 tx_buf->skb = NULL;
217
218 return new_cons;
219}
220
6383c0b3 221int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 222{
9f6c9258 223 struct netdev_queue *txq;
6383c0b3 224 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 225 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
226
227#ifdef BNX2X_STOP_ON_ERROR
228 if (unlikely(bp->panic))
229 return -1;
230#endif
231
6383c0b3
AE
232 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
233 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
234 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
235
236 while (sw_cons != hw_cons) {
237 u16 pkt_cons;
238
239 pkt_cons = TX_BD(sw_cons);
240
51c1a580
MS
241 DP(NETIF_MSG_TX_DONE,
242 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 243 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 244
2df1a70a 245 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 246 &pkts_compl, &bytes_compl);
2df1a70a 247
9f6c9258
DK
248 sw_cons++;
249 }
250
2df1a70a
TH
251 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
252
6383c0b3
AE
253 txdata->tx_pkt_cons = sw_cons;
254 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
255
256 /* Need to make the tx_bd_cons update visible to start_xmit()
257 * before checking for netif_tx_queue_stopped(). Without the
258 * memory barrier, there is a small possibility that
259 * start_xmit() will miss it and cause the queue to be stopped
260 * forever.
619c5cb6
VZ
261 * On the other hand we need an rmb() here to ensure the proper
262 * ordering of bit testing in the following
263 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
264 */
265 smp_mb();
266
9f6c9258 267 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 268 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
269 * while it's empty. This could have happen if rx_action() gets
270 * suspended in bnx2x_tx_int() after the condition before
271 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
272 *
273 * stops the queue->sees fresh tx_bd_cons->releases the queue->
274 * sends some packets consuming the whole queue again->
275 * stops the queue
276 */
277
278 __netif_tx_lock(txq, smp_processor_id());
279
280 if ((netif_tx_queue_stopped(txq)) &&
281 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 282 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
283 netif_tx_wake_queue(txq);
284
285 __netif_tx_unlock(txq);
286 }
287 return 0;
288}
289
290static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
291 u16 idx)
292{
293 u16 last_max = fp->last_max_sge;
294
295 if (SUB_S16(idx, last_max) > 0)
296 fp->last_max_sge = idx;
297}
298
621b4d66
DK
299static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
300 u16 sge_len,
301 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
302{
303 struct bnx2x *bp = fp->bp;
9f6c9258
DK
304 u16 last_max, last_elem, first_elem;
305 u16 delta = 0;
306 u16 i;
307
308 if (!sge_len)
309 return;
310
311 /* First mark all used pages */
312 for (i = 0; i < sge_len; i++)
619c5cb6 313 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 314 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
315
316 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 317 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
318
319 /* Here we assume that the last SGE index is the biggest */
320 prefetch((void *)(fp->sge_mask));
523224a3 321 bnx2x_update_last_max_sge(fp,
621b4d66 322 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
323
324 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
325 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
326 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
327
328 /* If ring is not full */
329 if (last_elem + 1 != first_elem)
330 last_elem++;
331
332 /* Now update the prod */
333 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
334 if (likely(fp->sge_mask[i]))
335 break;
336
619c5cb6
VZ
337 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
338 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
339 }
340
341 if (delta > 0) {
342 fp->rx_sge_prod += delta;
343 /* clear page-end entries */
344 bnx2x_clear_sge_mask_next_elems(fp);
345 }
346
347 DP(NETIF_MSG_RX_STATUS,
348 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
349 fp->last_max_sge, fp->rx_sge_prod);
350}
351
2de67439 352/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
353 * CQE (calculated by HW).
354 */
355static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
356 const struct eth_fast_path_rx_cqe *cqe,
357 bool *l4_rxhash)
e52fcb24 358{
2de67439 359 /* Get Toeplitz hash from CQE */
e52fcb24 360 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
361 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
362 enum eth_rss_hash_type htype;
363
364 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
365 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
366 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 367 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
368 }
369 *l4_rxhash = false;
e52fcb24
ED
370 return 0;
371}
372
9f6c9258 373static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 374 u16 cons, u16 prod,
619c5cb6 375 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
376{
377 struct bnx2x *bp = fp->bp;
378 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
379 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
380 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
381 dma_addr_t mapping;
619c5cb6
VZ
382 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
383 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 384
619c5cb6
VZ
385 /* print error if current state != stop */
386 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
387 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
388
e52fcb24 389 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 390 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 391 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
392 fp->rx_buf_size, DMA_FROM_DEVICE);
393 /*
394 * ...if it fails - move the skb from the consumer to the producer
395 * and set the current aggregation state as ERROR to drop it
396 * when TPA_STOP arrives.
397 */
398
399 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
400 /* Move the BD from the consumer to the producer */
e52fcb24 401 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
402 tpa_info->tpa_state = BNX2X_TPA_ERROR;
403 return;
404 }
9f6c9258 405
e52fcb24
ED
406 /* move empty data from pool to prod */
407 prod_rx_buf->data = first_buf->data;
619c5cb6 408 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 409 /* point prod_bd to new data */
9f6c9258
DK
410 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
411 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
412
619c5cb6
VZ
413 /* move partial skb from cons to pool (don't unmap yet) */
414 *first_buf = *cons_rx_buf;
415
416 /* mark bin state as START */
417 tpa_info->parsing_flags =
418 le16_to_cpu(cqe->pars_flags.flags);
419 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
420 tpa_info->tpa_state = BNX2X_TPA_START;
421 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
422 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 423 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
424 if (fp->mode == TPA_MODE_GRO) {
425 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 426 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
427 tpa_info->gro_size = gro_size;
428 }
619c5cb6 429
9f6c9258
DK
430#ifdef BNX2X_STOP_ON_ERROR
431 fp->tpa_queue_used |= (1 << queue);
432#ifdef _ASM_GENERIC_INT_L64_H
433 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
434#else
435 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
436#endif
437 fp->tpa_queue_used);
438#endif
439}
440
e4e3c02a
VZ
441/* Timestamp option length allowed for TPA aggregation:
442 *
443 * nop nop kind length echo val
444 */
445#define TPA_TSTAMP_OPT_LEN 12
446/**
cbf1de72 447 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 448 *
cbf1de72 449 * @skb: packet skb
e8920674
DK
450 * @parsing_flags: parsing flags from the START CQE
451 * @len_on_bd: total length of the first packet for the
452 * aggregation.
cbf1de72 453 * @pkt_len: length of all segments
e8920674
DK
454 *
455 * Approximate value of the MSS for this aggregation calculated using
456 * the first packet of it.
2de67439 457 * Compute number of aggregated segments, and gso_type.
e4e3c02a 458 */
cbf1de72 459static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
460 u16 len_on_bd, unsigned int pkt_len,
461 u16 num_of_coalesced_segs)
e4e3c02a 462{
cbf1de72 463 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 464 * other than timestamp or IPv6 extension headers.
e4e3c02a 465 */
619c5cb6
VZ
466 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
467
468 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 469 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 470 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
471 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
472 } else {
619c5cb6 473 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
474 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
475 }
e4e3c02a
VZ
476
477 /* Check if there was a TCP timestamp, if there is it's will
478 * always be 12 bytes length: nop nop kind length echo val.
479 *
480 * Otherwise FW would close the aggregation.
481 */
482 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
483 hdrs_len += TPA_TSTAMP_OPT_LEN;
484
cbf1de72
YM
485 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
486
487 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
488 * to skb_shinfo(skb)->gso_segs
489 */
ab5777d7 490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
491}
492
996dedba
MS
493static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
494 u16 index, gfp_t gfp_mask)
1191cb83 495{
996dedba 496 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
1191cb83
ED
497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
499 dma_addr_t mapping;
500
501 if (unlikely(page == NULL)) {
502 BNX2X_ERR("Can't alloc sge\n");
503 return -ENOMEM;
504 }
505
506 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 507 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
508 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
509 __free_pages(page, PAGES_PER_SGE_SHIFT);
510 BNX2X_ERR("Can't map sge\n");
511 return -ENOMEM;
512 }
513
514 sw_buf->page = page;
515 dma_unmap_addr_set(sw_buf, mapping, mapping);
516
517 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
518 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
519
520 return 0;
521}
522
9f6c9258 523static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
524 struct bnx2x_agg_info *tpa_info,
525 u16 pages,
526 struct sk_buff *skb,
619c5cb6
VZ
527 struct eth_end_agg_rx_cqe *cqe,
528 u16 cqe_idx)
9f6c9258
DK
529{
530 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
531 u32 i, frag_len, frag_size;
532 int err, j, frag_id = 0;
619c5cb6 533 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 534 u16 full_page = 0, gro_size = 0;
9f6c9258 535
619c5cb6 536 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
537
538 if (fp->mode == TPA_MODE_GRO) {
539 gro_size = tpa_info->gro_size;
540 full_page = tpa_info->full_page;
541 }
9f6c9258
DK
542
543 /* This is needed in order to enable forwarding support */
cbf1de72
YM
544 if (frag_size)
545 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
546 le16_to_cpu(cqe->pkt_len),
547 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 548
9f6c9258 549#ifdef BNX2X_STOP_ON_ERROR
924d75ab 550 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
551 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
552 pages, cqe_idx);
619c5cb6 553 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
554 bnx2x_panic();
555 return -EINVAL;
556 }
557#endif
558
559 /* Run through the SGL and compose the fragmented skb */
560 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 561 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
562
563 /* FW gives the indices of the SGE as if the ring is an array
564 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
565 if (fp->mode == TPA_MODE_GRO)
566 frag_len = min_t(u32, frag_size, (u32)full_page);
567 else /* LRO */
924d75ab 568 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 569
9f6c9258
DK
570 rx_pg = &fp->rx_page_ring[sge_idx];
571 old_rx_pg = *rx_pg;
572
573 /* If we fail to allocate a substitute page, we simply stop
574 where we are and drop the whole packet */
996dedba 575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 576 if (unlikely(err)) {
15192a8c 577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
578 return err;
579 }
580
16a5fd92 581 /* Unmap the page as we're going to pass it to the stack */
9f6c9258
DK
582 dma_unmap_page(&bp->pdev->dev,
583 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 584 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 585 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
586 if (fp->mode == TPA_MODE_LRO)
587 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
588 else { /* GRO */
589 int rem;
590 int offset = 0;
591 for (rem = frag_len; rem > 0; rem -= gro_size) {
592 int len = rem > gro_size ? gro_size : rem;
593 skb_fill_page_desc(skb, frag_id++,
594 old_rx_pg.page, offset, len);
595 if (offset)
596 get_page(old_rx_pg.page);
597 offset += len;
598 }
599 }
9f6c9258
DK
600
601 skb->data_len += frag_len;
924d75ab 602 skb->truesize += SGE_PAGES;
9f6c9258
DK
603 skb->len += frag_len;
604
605 frag_size -= frag_len;
606 }
607
608 return 0;
609}
610
d46d132c
ED
611static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
612{
613 if (fp->rx_frag_size)
614 put_page(virt_to_head_page(data));
615 else
616 kfree(data);
617}
618
996dedba 619static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 620{
996dedba
MS
621 if (fp->rx_frag_size) {
622 /* GFP_KERNEL allocations are used only during initialization */
623 if (unlikely(gfp_mask & __GFP_WAIT))
624 return (void *)__get_free_page(gfp_mask);
625
d46d132c 626 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 627 }
d46d132c 628
996dedba 629 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
630}
631
9969085e
YM
632#ifdef CONFIG_INET
633static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
634{
635 const struct iphdr *iph = ip_hdr(skb);
636 struct tcphdr *th;
637
638 skb_set_transport_header(skb, sizeof(struct iphdr));
639 th = tcp_hdr(skb);
640
641 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
642 iph->saddr, iph->daddr, 0);
643}
644
645static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
646{
647 struct ipv6hdr *iph = ipv6_hdr(skb);
648 struct tcphdr *th;
649
650 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
651 th = tcp_hdr(skb);
652
653 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
654 &iph->saddr, &iph->daddr, 0);
655}
2c2d06d5
YM
656
657static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
658 void (*gro_func)(struct bnx2x*, struct sk_buff*))
659{
660 skb_set_network_header(skb, 0);
661 gro_func(bp, skb);
662 tcp_gro_complete(skb);
663}
9969085e
YM
664#endif
665
666static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
667 struct sk_buff *skb)
668{
669#ifdef CONFIG_INET
cbf1de72 670 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
671 switch (be16_to_cpu(skb->protocol)) {
672 case ETH_P_IP:
2c2d06d5 673 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
674 break;
675 case ETH_P_IPV6:
2c2d06d5 676 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
677 break;
678 default:
2c2d06d5 679 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
680 be16_to_cpu(skb->protocol));
681 }
9969085e
YM
682 }
683#endif
60e66fee 684 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
685 napi_gro_receive(&fp->napi, skb);
686}
687
1191cb83
ED
688static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
689 struct bnx2x_agg_info *tpa_info,
690 u16 pages,
691 struct eth_end_agg_rx_cqe *cqe,
692 u16 cqe_idx)
9f6c9258 693{
619c5cb6 694 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 695 u8 pad = tpa_info->placement_offset;
619c5cb6 696 u16 len = tpa_info->len_on_bd;
e52fcb24 697 struct sk_buff *skb = NULL;
621b4d66 698 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
699 u8 old_tpa_state = tpa_info->tpa_state;
700
701 tpa_info->tpa_state = BNX2X_TPA_STOP;
702
703 /* If we there was an error during the handling of the TPA_START -
704 * drop this aggregation.
705 */
706 if (old_tpa_state == BNX2X_TPA_ERROR)
707 goto drop;
708
e52fcb24 709 /* Try to allocate the new data */
996dedba 710 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
711 /* Unmap skb in the pool anyway, as we are going to change
712 pool entry status to BNX2X_TPA_STOP even if new skb allocation
713 fails. */
714 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 715 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 716 if (likely(new_data))
d46d132c 717 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 718
e52fcb24 719 if (likely(skb)) {
9f6c9258 720#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 721 if (pad + len > fp->rx_buf_size) {
51c1a580 722 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 723 pad, len, fp->rx_buf_size);
9f6c9258
DK
724 bnx2x_panic();
725 return;
726 }
727#endif
728
e52fcb24 729 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 730 skb_put(skb, len);
bd5cef03
TH
731 skb_set_hash(skb, tpa_info->rxhash,
732 tpa_info->l4_rxhash ?
733 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
9f6c9258
DK
734
735 skb->protocol = eth_type_trans(skb, bp->dev);
736 skb->ip_summed = CHECKSUM_UNNECESSARY;
737
621b4d66
DK
738 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
739 skb, cqe, cqe_idx)) {
619c5cb6 740 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 741 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 742 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 743 } else {
51c1a580
MS
744 DP(NETIF_MSG_RX_STATUS,
745 "Failed to allocate new pages - dropping packet!\n");
40955532 746 dev_kfree_skb_any(skb);
9f6c9258
DK
747 }
748
e52fcb24
ED
749 /* put new data in bin */
750 rx_buf->data = new_data;
9f6c9258 751
619c5cb6 752 return;
9f6c9258 753 }
d46d132c 754 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
755drop:
756 /* drop the packet and keep the buffer in the bin */
757 DP(NETIF_MSG_RX_STATUS,
758 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 759 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
760}
761
996dedba
MS
762static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
763 u16 index, gfp_t gfp_mask)
1191cb83
ED
764{
765 u8 *data;
766 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
767 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
768 dma_addr_t mapping;
769
996dedba 770 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
771 if (unlikely(data == NULL))
772 return -ENOMEM;
773
774 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
775 fp->rx_buf_size,
776 DMA_FROM_DEVICE);
777 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 778 bnx2x_frag_free(fp, data);
1191cb83
ED
779 BNX2X_ERR("Can't map rx data\n");
780 return -ENOMEM;
781 }
782
783 rx_buf->data = data;
784 dma_unmap_addr_set(rx_buf, mapping, mapping);
785
786 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
787 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
788
789 return 0;
790}
791
15192a8c
BW
792static
793void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
794 struct bnx2x_fastpath *fp,
795 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 796{
e488921f
MS
797 /* Do nothing if no L4 csum validation was done.
798 * We do not check whether IP csum was validated. For IPv4 we assume
799 * that if the card got as far as validating the L4 csum, it also
800 * validated the IP csum. IPv6 has no IP csum.
801 */
d6cb3e41 802 if (cqe->fast_path_cqe.status_flags &
e488921f 803 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
804 return;
805
e488921f 806 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
807
808 if (cqe->fast_path_cqe.type_error_flags &
809 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
810 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 811 qstats->hw_csum_err++;
d6cb3e41
ED
812 else
813 skb->ip_summed = CHECKSUM_UNNECESSARY;
814}
9f6c9258
DK
815
816int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
817{
818 struct bnx2x *bp = fp->bp;
819 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 820 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 821 int rx_pkt = 0;
75b29459
DK
822 union eth_rx_cqe *cqe;
823 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
824
825#ifdef BNX2X_STOP_ON_ERROR
826 if (unlikely(bp->panic))
827 return 0;
828#endif
829
9f6c9258
DK
830 bd_cons = fp->rx_bd_cons;
831 bd_prod = fp->rx_bd_prod;
832 bd_prod_fw = bd_prod;
833 sw_comp_cons = fp->rx_comp_cons;
834 sw_comp_prod = fp->rx_comp_prod;
835
75b29459
DK
836 comp_ring_cons = RCQ_BD(sw_comp_cons);
837 cqe = &fp->rx_comp_ring[comp_ring_cons];
838 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
839
840 DP(NETIF_MSG_RX_STATUS,
75b29459 841 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 842
75b29459 843 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
844 struct sw_rx_bd *rx_buf = NULL;
845 struct sk_buff *skb;
9f6c9258 846 u8 cqe_fp_flags;
619c5cb6 847 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 848 u16 len, pad, queue;
e52fcb24 849 u8 *data;
bd5cef03 850 u32 rxhash;
a334b5fb 851 bool l4_rxhash;
9f6c9258 852
619c5cb6
VZ
853#ifdef BNX2X_STOP_ON_ERROR
854 if (unlikely(bp->panic))
855 return 0;
856#endif
857
9f6c9258
DK
858 bd_prod = RX_BD(bd_prod);
859 bd_cons = RX_BD(bd_cons);
860
619c5cb6
VZ
861 cqe_fp_flags = cqe_fp->type_error_flags;
862 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 863
51c1a580
MS
864 DP(NETIF_MSG_RX_STATUS,
865 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
866 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
867 cqe_fp_flags, cqe_fp->status_flags,
868 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
869 le16_to_cpu(cqe_fp->vlan_tag),
870 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
871
872 /* is this a slowpath msg? */
619c5cb6 873 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
874 bnx2x_sp_event(fp, cqe);
875 goto next_cqe;
e52fcb24 876 }
621b4d66 877
e52fcb24
ED
878 rx_buf = &fp->rx_buf_ring[bd_cons];
879 data = rx_buf->data;
9f6c9258 880
e52fcb24 881 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
882 struct bnx2x_agg_info *tpa_info;
883 u16 frag_size, pages;
619c5cb6 884#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
885 /* sanity check */
886 if (fp->disable_tpa &&
887 (CQE_TYPE_START(cqe_fp_type) ||
888 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 889 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 890 CQE_TYPE(cqe_fp_type));
619c5cb6 891#endif
9f6c9258 892
e52fcb24
ED
893 if (CQE_TYPE_START(cqe_fp_type)) {
894 u16 queue = cqe_fp->queue_index;
895 DP(NETIF_MSG_RX_STATUS,
896 "calling tpa_start on queue %d\n",
897 queue);
9f6c9258 898
e52fcb24
ED
899 bnx2x_tpa_start(fp, queue,
900 bd_cons, bd_prod,
901 cqe_fp);
621b4d66 902
e52fcb24 903 goto next_rx;
621b4d66
DK
904 }
905 queue = cqe->end_agg_cqe.queue_index;
906 tpa_info = &fp->tpa_info[queue];
907 DP(NETIF_MSG_RX_STATUS,
908 "calling tpa_stop on queue %d\n",
909 queue);
910
911 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
912 tpa_info->len_on_bd;
913
914 if (fp->mode == TPA_MODE_GRO)
915 pages = (frag_size + tpa_info->full_page - 1) /
916 tpa_info->full_page;
917 else
918 pages = SGE_PAGE_ALIGN(frag_size) >>
919 SGE_PAGE_SHIFT;
920
921 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
922 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 923#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
924 if (bp->panic)
925 return 0;
9f6c9258
DK
926#endif
927
621b4d66
DK
928 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
929 goto next_cqe;
e52fcb24
ED
930 }
931 /* non TPA */
621b4d66 932 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
933 pad = cqe_fp->placement_offset;
934 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 935 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
936 pad + RX_COPY_THRESH,
937 DMA_FROM_DEVICE);
938 pad += NET_SKB_PAD;
939 prefetch(data + pad); /* speedup eth_type_trans() */
940 /* is this an error packet? */
941 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 942 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
943 "ERROR flags %x rx packet %u\n",
944 cqe_fp_flags, sw_comp_cons);
15192a8c 945 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
946 goto reuse_rx;
947 }
9f6c9258 948
e52fcb24
ED
949 /* Since we don't have a jumbo ring
950 * copy small packets if mtu > 1500
951 */
952 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
953 (len <= RX_COPY_THRESH)) {
954 skb = netdev_alloc_skb_ip_align(bp->dev, len);
955 if (skb == NULL) {
51c1a580 956 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 957 "ERROR packet dropped because of alloc failure\n");
15192a8c 958 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
959 goto reuse_rx;
960 }
e52fcb24
ED
961 memcpy(skb->data, data + pad, len);
962 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
963 } else {
996dedba
MS
964 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
965 GFP_ATOMIC) == 0)) {
9f6c9258 966 dma_unmap_single(&bp->pdev->dev,
e52fcb24 967 dma_unmap_addr(rx_buf, mapping),
a8c94b91 968 fp->rx_buf_size,
9f6c9258 969 DMA_FROM_DEVICE);
d46d132c 970 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 971 if (unlikely(!skb)) {
d46d132c 972 bnx2x_frag_free(fp, data);
15192a8c
BW
973 bnx2x_fp_qstats(bp, fp)->
974 rx_skb_alloc_failed++;
e52fcb24
ED
975 goto next_rx;
976 }
9f6c9258 977 skb_reserve(skb, pad);
9f6c9258 978 } else {
51c1a580
MS
979 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
980 "ERROR packet dropped because of alloc failure\n");
15192a8c 981 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 982reuse_rx:
e52fcb24 983 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
984 goto next_rx;
985 }
036d2df9 986 }
9f6c9258 987
036d2df9
DK
988 skb_put(skb, len);
989 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 990
036d2df9 991 /* Set Toeplitz hash for a none-LRO skb */
bd5cef03
TH
992 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
993 skb_set_hash(skb, rxhash,
994 l4_rxhash ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
9f6c9258 995
036d2df9 996 skb_checksum_none_assert(skb);
f85582f8 997
d6cb3e41 998 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
999 bnx2x_csum_validate(skb, cqe, fp,
1000 bnx2x_fp_qstats(bp, fp));
9f6c9258 1001
f233cafe 1002 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1003
619c5cb6
VZ
1004 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1005 PARSING_FLAGS_VLAN)
86a9bad3 1006 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1007 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1008
8b80cda5 1009 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1010
1011 if (bnx2x_fp_ll_polling(fp))
1012 netif_receive_skb(skb);
1013 else
1014 napi_gro_receive(&fp->napi, skb);
9f6c9258 1015next_rx:
e52fcb24 1016 rx_buf->data = NULL;
9f6c9258
DK
1017
1018 bd_cons = NEXT_RX_IDX(bd_cons);
1019 bd_prod = NEXT_RX_IDX(bd_prod);
1020 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1021 rx_pkt++;
1022next_cqe:
1023 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1024 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1025
75b29459
DK
1026 /* mark CQE as free */
1027 BNX2X_SEED_CQE(cqe_fp);
1028
9f6c9258
DK
1029 if (rx_pkt == budget)
1030 break;
75b29459
DK
1031
1032 comp_ring_cons = RCQ_BD(sw_comp_cons);
1033 cqe = &fp->rx_comp_ring[comp_ring_cons];
1034 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1035 } /* while */
1036
1037 fp->rx_bd_cons = bd_cons;
1038 fp->rx_bd_prod = bd_prod_fw;
1039 fp->rx_comp_cons = sw_comp_cons;
1040 fp->rx_comp_prod = sw_comp_prod;
1041
1042 /* Update producers */
1043 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1044 fp->rx_sge_prod);
1045
1046 fp->rx_pkt += rx_pkt;
1047 fp->rx_calls++;
1048
1049 return rx_pkt;
1050}
1051
1052static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1053{
1054 struct bnx2x_fastpath *fp = fp_cookie;
1055 struct bnx2x *bp = fp->bp;
6383c0b3 1056 u8 cos;
9f6c9258 1057
51c1a580
MS
1058 DP(NETIF_MSG_INTR,
1059 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1060 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1061
523224a3 1062 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1063
1064#ifdef BNX2X_STOP_ON_ERROR
1065 if (unlikely(bp->panic))
1066 return IRQ_HANDLED;
1067#endif
1068
1069 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1070 for_each_cos_in_tx_queue(fp, cos)
65565884 1071 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1072
523224a3 1073 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1074 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1075
1076 return IRQ_HANDLED;
1077}
1078
9f6c9258
DK
1079/* HW Lock for shared dual port PHYs */
1080void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1081{
1082 mutex_lock(&bp->port.phy_mutex);
1083
8203c4b6 1084 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1085}
1086
1087void bnx2x_release_phy_lock(struct bnx2x *bp)
1088{
8203c4b6 1089 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1090
1091 mutex_unlock(&bp->port.phy_mutex);
1092}
1093
0793f83f
DK
1094/* calculates MF speed according to current linespeed and MF configuration */
1095u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1096{
1097 u16 line_speed = bp->link_vars.line_speed;
1098 if (IS_MF(bp)) {
faa6fcbb
DK
1099 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1100 bp->mf_config[BP_VN(bp)]);
1101
1102 /* Calculate the current MAX line speed limit for the MF
1103 * devices
0793f83f 1104 */
faa6fcbb
DK
1105 if (IS_MF_SI(bp))
1106 line_speed = (line_speed * maxCfg) / 100;
1107 else { /* SD mode */
0793f83f
DK
1108 u16 vn_max_rate = maxCfg * 100;
1109
1110 if (vn_max_rate < line_speed)
1111 line_speed = vn_max_rate;
faa6fcbb 1112 }
0793f83f
DK
1113 }
1114
1115 return line_speed;
1116}
1117
2ae17f66
VZ
1118/**
1119 * bnx2x_fill_report_data - fill link report data to report
1120 *
1121 * @bp: driver handle
1122 * @data: link state to update
1123 *
1124 * It uses a none-atomic bit operations because is called under the mutex.
1125 */
1191cb83
ED
1126static void bnx2x_fill_report_data(struct bnx2x *bp,
1127 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1128{
1129 u16 line_speed = bnx2x_get_mf_speed(bp);
1130
1131 memset(data, 0, sizeof(*data));
1132
16a5fd92 1133 /* Fill the report data: effective line speed */
2ae17f66
VZ
1134 data->line_speed = line_speed;
1135
1136 /* Link is down */
1137 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1138 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1139 &data->link_report_flags);
1140
1141 /* Full DUPLEX */
1142 if (bp->link_vars.duplex == DUPLEX_FULL)
1143 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1144
1145 /* Rx Flow Control is ON */
1146 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1147 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1148
1149 /* Tx Flow Control is ON */
1150 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1151 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1152}
1153
1154/**
1155 * bnx2x_link_report - report link status to OS.
1156 *
1157 * @bp: driver handle
1158 *
1159 * Calls the __bnx2x_link_report() under the same locking scheme
1160 * as a link/PHY state managing code to ensure a consistent link
1161 * reporting.
1162 */
1163
9f6c9258
DK
1164void bnx2x_link_report(struct bnx2x *bp)
1165{
2ae17f66
VZ
1166 bnx2x_acquire_phy_lock(bp);
1167 __bnx2x_link_report(bp);
1168 bnx2x_release_phy_lock(bp);
1169}
9f6c9258 1170
2ae17f66
VZ
1171/**
1172 * __bnx2x_link_report - report link status to OS.
1173 *
1174 * @bp: driver handle
1175 *
16a5fd92 1176 * None atomic implementation.
2ae17f66
VZ
1177 * Should be called under the phy_lock.
1178 */
1179void __bnx2x_link_report(struct bnx2x *bp)
1180{
1181 struct bnx2x_link_report_data cur_data;
9f6c9258 1182
2ae17f66 1183 /* reread mf_cfg */
ad5afc89 1184 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1185 bnx2x_read_mf_cfg(bp);
1186
1187 /* Read the current link report info */
1188 bnx2x_fill_report_data(bp, &cur_data);
1189
1190 /* Don't report link down or exactly the same link status twice */
1191 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1192 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1193 &bp->last_reported_link.link_report_flags) &&
1194 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1195 &cur_data.link_report_flags)))
1196 return;
1197
1198 bp->link_cnt++;
9f6c9258 1199
2ae17f66
VZ
1200 /* We are going to report a new link parameters now -
1201 * remember the current data for the next time.
1202 */
1203 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1204
2ae17f66
VZ
1205 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1206 &cur_data.link_report_flags)) {
1207 netif_carrier_off(bp->dev);
1208 netdev_err(bp->dev, "NIC Link is Down\n");
1209 return;
1210 } else {
94f05b0f
JP
1211 const char *duplex;
1212 const char *flow;
1213
2ae17f66 1214 netif_carrier_on(bp->dev);
9f6c9258 1215
2ae17f66
VZ
1216 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1217 &cur_data.link_report_flags))
94f05b0f 1218 duplex = "full";
9f6c9258 1219 else
94f05b0f 1220 duplex = "half";
9f6c9258 1221
2ae17f66
VZ
1222 /* Handle the FC at the end so that only these flags would be
1223 * possibly set. This way we may easily check if there is no FC
1224 * enabled.
1225 */
1226 if (cur_data.link_report_flags) {
1227 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1228 &cur_data.link_report_flags)) {
2ae17f66
VZ
1229 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1230 &cur_data.link_report_flags))
94f05b0f
JP
1231 flow = "ON - receive & transmit";
1232 else
1233 flow = "ON - receive";
9f6c9258 1234 } else {
94f05b0f 1235 flow = "ON - transmit";
9f6c9258 1236 }
94f05b0f
JP
1237 } else {
1238 flow = "none";
9f6c9258 1239 }
94f05b0f
JP
1240 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1241 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1242 }
1243}
1244
1191cb83
ED
1245static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1246{
1247 int i;
1248
1249 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1250 struct eth_rx_sge *sge;
1251
1252 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1253 sge->addr_hi =
1254 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1255 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1256
1257 sge->addr_lo =
1258 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1259 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1260 }
1261}
1262
1263static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1264 struct bnx2x_fastpath *fp, int last)
1265{
1266 int i;
1267
1268 for (i = 0; i < last; i++) {
1269 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1270 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1271 u8 *data = first_buf->data;
1272
1273 if (data == NULL) {
1274 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1275 continue;
1276 }
1277 if (tpa_info->tpa_state == BNX2X_TPA_START)
1278 dma_unmap_single(&bp->pdev->dev,
1279 dma_unmap_addr(first_buf, mapping),
1280 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1281 bnx2x_frag_free(fp, data);
1191cb83
ED
1282 first_buf->data = NULL;
1283 }
1284}
1285
55c11941
MS
1286void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1287{
1288 int j;
1289
1290 for_each_rx_queue_cnic(bp, j) {
1291 struct bnx2x_fastpath *fp = &bp->fp[j];
1292
1293 fp->rx_bd_cons = 0;
1294
1295 /* Activate BD ring */
1296 /* Warning!
1297 * this will generate an interrupt (to the TSTORM)
1298 * must only be done after chip is initialized
1299 */
1300 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1301 fp->rx_sge_prod);
1302 }
1303}
1304
9f6c9258
DK
1305void bnx2x_init_rx_rings(struct bnx2x *bp)
1306{
1307 int func = BP_FUNC(bp);
523224a3 1308 u16 ring_prod;
9f6c9258 1309 int i, j;
25141580 1310
b3b83c3f 1311 /* Allocate TPA resources */
55c11941 1312 for_each_eth_queue(bp, j) {
523224a3 1313 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1314
a8c94b91
VZ
1315 DP(NETIF_MSG_IFUP,
1316 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1317
523224a3 1318 if (!fp->disable_tpa) {
16a5fd92 1319 /* Fill the per-aggregation pool */
dfacf138 1320 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1321 struct bnx2x_agg_info *tpa_info =
1322 &fp->tpa_info[i];
1323 struct sw_rx_bd *first_buf =
1324 &tpa_info->first_buf;
1325
996dedba
MS
1326 first_buf->data =
1327 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1328 if (!first_buf->data) {
51c1a580
MS
1329 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1330 j);
9f6c9258
DK
1331 bnx2x_free_tpa_pool(bp, fp, i);
1332 fp->disable_tpa = 1;
1333 break;
1334 }
619c5cb6
VZ
1335 dma_unmap_addr_set(first_buf, mapping, 0);
1336 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1337 }
523224a3
DK
1338
1339 /* "next page" elements initialization */
1340 bnx2x_set_next_page_sgl(fp);
1341
1342 /* set SGEs bit mask */
1343 bnx2x_init_sge_ring_bit_mask(fp);
1344
1345 /* Allocate SGEs and initialize the ring elements */
1346 for (i = 0, ring_prod = 0;
1347 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1348
996dedba
MS
1349 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1350 GFP_KERNEL) < 0) {
51c1a580
MS
1351 BNX2X_ERR("was only able to allocate %d rx sges\n",
1352 i);
1353 BNX2X_ERR("disabling TPA for queue[%d]\n",
1354 j);
523224a3 1355 /* Cleanup already allocated elements */
619c5cb6
VZ
1356 bnx2x_free_rx_sge_range(bp, fp,
1357 ring_prod);
1358 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1359 MAX_AGG_QS(bp));
523224a3
DK
1360 fp->disable_tpa = 1;
1361 ring_prod = 0;
1362 break;
1363 }
1364 ring_prod = NEXT_SGE_IDX(ring_prod);
1365 }
1366
1367 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1368 }
1369 }
1370
55c11941 1371 for_each_eth_queue(bp, j) {
9f6c9258
DK
1372 struct bnx2x_fastpath *fp = &bp->fp[j];
1373
1374 fp->rx_bd_cons = 0;
9f6c9258 1375
b3b83c3f
DK
1376 /* Activate BD ring */
1377 /* Warning!
1378 * this will generate an interrupt (to the TSTORM)
1379 * must only be done after chip is initialized
1380 */
1381 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1382 fp->rx_sge_prod);
9f6c9258 1383
9f6c9258
DK
1384 if (j != 0)
1385 continue;
1386
619c5cb6 1387 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1388 REG_WR(bp, BAR_USTRORM_INTMEM +
1389 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1390 U64_LO(fp->rx_comp_mapping));
1391 REG_WR(bp, BAR_USTRORM_INTMEM +
1392 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1393 U64_HI(fp->rx_comp_mapping));
1394 }
9f6c9258
DK
1395 }
1396}
f85582f8 1397
55c11941 1398static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1399{
6383c0b3 1400 u8 cos;
55c11941 1401 struct bnx2x *bp = fp->bp;
9f6c9258 1402
55c11941
MS
1403 for_each_cos_in_tx_queue(fp, cos) {
1404 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1405 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1406
55c11941
MS
1407 u16 sw_prod = txdata->tx_pkt_prod;
1408 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1409
55c11941
MS
1410 while (sw_cons != sw_prod) {
1411 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1412 &pkts_compl, &bytes_compl);
1413 sw_cons++;
9f6c9258 1414 }
55c11941
MS
1415
1416 netdev_tx_reset_queue(
1417 netdev_get_tx_queue(bp->dev,
1418 txdata->txq_index));
1419 }
1420}
1421
1422static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1423{
1424 int i;
1425
1426 for_each_tx_queue_cnic(bp, i) {
1427 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1428 }
1429}
1430
1431static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1432{
1433 int i;
1434
1435 for_each_eth_queue(bp, i) {
1436 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1437 }
1438}
1439
b3b83c3f
DK
1440static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1441{
1442 struct bnx2x *bp = fp->bp;
1443 int i;
1444
1445 /* ring wasn't allocated */
1446 if (fp->rx_buf_ring == NULL)
1447 return;
1448
1449 for (i = 0; i < NUM_RX_BD; i++) {
1450 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1451 u8 *data = rx_buf->data;
b3b83c3f 1452
e52fcb24 1453 if (data == NULL)
b3b83c3f 1454 continue;
b3b83c3f
DK
1455 dma_unmap_single(&bp->pdev->dev,
1456 dma_unmap_addr(rx_buf, mapping),
1457 fp->rx_buf_size, DMA_FROM_DEVICE);
1458
e52fcb24 1459 rx_buf->data = NULL;
d46d132c 1460 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1461 }
1462}
1463
55c11941
MS
1464static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1465{
1466 int j;
1467
1468 for_each_rx_queue_cnic(bp, j) {
1469 bnx2x_free_rx_bds(&bp->fp[j]);
1470 }
1471}
1472
9f6c9258
DK
1473static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1474{
b3b83c3f 1475 int j;
9f6c9258 1476
55c11941 1477 for_each_eth_queue(bp, j) {
9f6c9258
DK
1478 struct bnx2x_fastpath *fp = &bp->fp[j];
1479
b3b83c3f 1480 bnx2x_free_rx_bds(fp);
9f6c9258 1481
9f6c9258 1482 if (!fp->disable_tpa)
dfacf138 1483 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1484 }
1485}
1486
55c11941
MS
1487void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1488{
1489 bnx2x_free_tx_skbs_cnic(bp);
1490 bnx2x_free_rx_skbs_cnic(bp);
1491}
1492
9f6c9258
DK
1493void bnx2x_free_skbs(struct bnx2x *bp)
1494{
1495 bnx2x_free_tx_skbs(bp);
1496 bnx2x_free_rx_skbs(bp);
1497}
1498
e3835b99
DK
1499void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1500{
1501 /* load old values */
1502 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1503
1504 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1505 /* leave all but MAX value */
1506 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1507
1508 /* set new MAX value */
1509 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1510 & FUNC_MF_CFG_MAX_BW_MASK;
1511
1512 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1513 }
1514}
1515
ca92429f
DK
1516/**
1517 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1518 *
1519 * @bp: driver handle
1520 * @nvecs: number of vectors to be released
1521 */
1522static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1523{
ca92429f 1524 int i, offset = 0;
9f6c9258 1525
ca92429f
DK
1526 if (nvecs == offset)
1527 return;
ad5afc89
AE
1528
1529 /* VFs don't have a default SB */
1530 if (IS_PF(bp)) {
1531 free_irq(bp->msix_table[offset].vector, bp->dev);
1532 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1533 bp->msix_table[offset].vector);
1534 offset++;
1535 }
55c11941
MS
1536
1537 if (CNIC_SUPPORT(bp)) {
1538 if (nvecs == offset)
1539 return;
1540 offset++;
1541 }
ca92429f 1542
ec6ba945 1543 for_each_eth_queue(bp, i) {
ca92429f
DK
1544 if (nvecs == offset)
1545 return;
51c1a580
MS
1546 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1547 i, bp->msix_table[offset].vector);
9f6c9258 1548
ca92429f 1549 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1550 }
1551}
1552
d6214d7a 1553void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1554{
30a5de77 1555 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1556 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1557 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1558
1559 /* vfs don't have a default status block */
1560 if (IS_PF(bp))
1561 nvecs++;
1562
1563 bnx2x_free_msix_irqs(bp, nvecs);
1564 } else {
30a5de77 1565 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1566 }
9f6c9258
DK
1567}
1568
0e8d2ec5 1569int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1570{
1ab4434c 1571 int msix_vec = 0, i, rc;
9f6c9258 1572
1ab4434c
AE
1573 /* VFs don't have a default status block */
1574 if (IS_PF(bp)) {
1575 bp->msix_table[msix_vec].entry = msix_vec;
1576 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1577 bp->msix_table[0].entry);
1578 msix_vec++;
1579 }
9f6c9258 1580
55c11941
MS
1581 /* Cnic requires an msix vector for itself */
1582 if (CNIC_SUPPORT(bp)) {
1583 bp->msix_table[msix_vec].entry = msix_vec;
1584 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1585 msix_vec, bp->msix_table[msix_vec].entry);
1586 msix_vec++;
1587 }
1588
6383c0b3 1589 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1590 for_each_eth_queue(bp, i) {
d6214d7a 1591 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1592 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1593 msix_vec, msix_vec, i);
d6214d7a 1594 msix_vec++;
9f6c9258
DK
1595 }
1596
1ab4434c
AE
1597 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1598 msix_vec);
d6214d7a 1599
1ab4434c 1600 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1601
1602 /*
1603 * reconfigure number of tx/rx queues according to available
1604 * MSI-X vectors
1605 */
55c11941 1606 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1607 /* how less vectors we will have? */
1ab4434c 1608 int diff = msix_vec - rc;
9f6c9258 1609
51c1a580 1610 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1611
1612 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1613
1614 if (rc) {
30a5de77
DK
1615 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1616 goto no_msix;
9f6c9258 1617 }
d6214d7a
DK
1618 /*
1619 * decrease number of queues by number of unallocated entries
1620 */
55c11941
MS
1621 bp->num_ethernet_queues -= diff;
1622 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1623
51c1a580 1624 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1625 bp->num_queues);
1626 } else if (rc > 0) {
1627 /* Get by with single vector */
1628 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1629 if (rc) {
1630 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1631 rc);
1632 goto no_msix;
1633 }
1634
1635 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1636 bp->flags |= USING_SINGLE_MSIX_FLAG;
1637
55c11941
MS
1638 BNX2X_DEV_INFO("set number of queues to 1\n");
1639 bp->num_ethernet_queues = 1;
1640 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1641 } else if (rc < 0) {
51c1a580 1642 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1643 goto no_msix;
9f6c9258
DK
1644 }
1645
1646 bp->flags |= USING_MSIX_FLAG;
1647
1648 return 0;
30a5de77
DK
1649
1650no_msix:
1651 /* fall to INTx if not enough memory */
1652 if (rc == -ENOMEM)
1653 bp->flags |= DISABLE_MSI_FLAG;
1654
1655 return rc;
9f6c9258
DK
1656}
1657
1658static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1659{
ca92429f 1660 int i, rc, offset = 0;
9f6c9258 1661
ad5afc89
AE
1662 /* no default status block for vf */
1663 if (IS_PF(bp)) {
1664 rc = request_irq(bp->msix_table[offset++].vector,
1665 bnx2x_msix_sp_int, 0,
1666 bp->dev->name, bp->dev);
1667 if (rc) {
1668 BNX2X_ERR("request sp irq failed\n");
1669 return -EBUSY;
1670 }
9f6c9258
DK
1671 }
1672
55c11941
MS
1673 if (CNIC_SUPPORT(bp))
1674 offset++;
1675
ec6ba945 1676 for_each_eth_queue(bp, i) {
9f6c9258
DK
1677 struct bnx2x_fastpath *fp = &bp->fp[i];
1678 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1679 bp->dev->name, i);
1680
d6214d7a 1681 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1682 bnx2x_msix_fp_int, 0, fp->name, fp);
1683 if (rc) {
ca92429f
DK
1684 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1685 bp->msix_table[offset].vector, rc);
1686 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1687 return -EBUSY;
1688 }
1689
d6214d7a 1690 offset++;
9f6c9258
DK
1691 }
1692
ec6ba945 1693 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1694 if (IS_PF(bp)) {
1695 offset = 1 + CNIC_SUPPORT(bp);
1696 netdev_info(bp->dev,
1697 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1698 bp->msix_table[0].vector,
1699 0, bp->msix_table[offset].vector,
1700 i - 1, bp->msix_table[offset + i - 1].vector);
1701 } else {
1702 offset = CNIC_SUPPORT(bp);
1703 netdev_info(bp->dev,
1704 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1705 0, bp->msix_table[offset].vector,
1706 i - 1, bp->msix_table[offset + i - 1].vector);
1707 }
9f6c9258
DK
1708 return 0;
1709}
1710
d6214d7a 1711int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1712{
1713 int rc;
1714
1715 rc = pci_enable_msi(bp->pdev);
1716 if (rc) {
51c1a580 1717 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1718 return -1;
1719 }
1720 bp->flags |= USING_MSI_FLAG;
1721
1722 return 0;
1723}
1724
1725static int bnx2x_req_irq(struct bnx2x *bp)
1726{
1727 unsigned long flags;
30a5de77 1728 unsigned int irq;
9f6c9258 1729
30a5de77 1730 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1731 flags = 0;
1732 else
1733 flags = IRQF_SHARED;
1734
30a5de77
DK
1735 if (bp->flags & USING_MSIX_FLAG)
1736 irq = bp->msix_table[0].vector;
1737 else
1738 irq = bp->pdev->irq;
1739
1740 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1741}
1742
c957d09f 1743static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1744{
1745 int rc = 0;
30a5de77
DK
1746 if (bp->flags & USING_MSIX_FLAG &&
1747 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1748 rc = bnx2x_req_msix_irqs(bp);
1749 if (rc)
1750 return rc;
1751 } else {
619c5cb6
VZ
1752 rc = bnx2x_req_irq(bp);
1753 if (rc) {
1754 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1755 return rc;
1756 }
1757 if (bp->flags & USING_MSI_FLAG) {
1758 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1759 netdev_info(bp->dev, "using MSI IRQ %d\n",
1760 bp->dev->irq);
1761 }
1762 if (bp->flags & USING_MSIX_FLAG) {
1763 bp->dev->irq = bp->msix_table[0].vector;
1764 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1765 bp->dev->irq);
619c5cb6
VZ
1766 }
1767 }
1768
1769 return 0;
1770}
1771
55c11941
MS
1772static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1773{
1774 int i;
1775
8f20aa57
DK
1776 for_each_rx_queue_cnic(bp, i) {
1777 bnx2x_fp_init_lock(&bp->fp[i]);
55c11941 1778 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1779 }
55c11941
MS
1780}
1781
1191cb83 1782static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1783{
1784 int i;
1785
8f20aa57
DK
1786 for_each_eth_queue(bp, i) {
1787 bnx2x_fp_init_lock(&bp->fp[i]);
9f6c9258 1788 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1789 }
9f6c9258
DK
1790}
1791
55c11941
MS
1792static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1793{
1794 int i;
1795
8f20aa57
DK
1796 local_bh_disable();
1797 for_each_rx_queue_cnic(bp, i) {
55c11941 1798 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57
DK
1799 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1800 mdelay(1);
1801 }
1802 local_bh_enable();
55c11941
MS
1803}
1804
1191cb83 1805static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1806{
1807 int i;
1808
8f20aa57
DK
1809 local_bh_disable();
1810 for_each_eth_queue(bp, i) {
9f6c9258 1811 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57
DK
1812 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1813 mdelay(1);
1814 }
1815 local_bh_enable();
9f6c9258
DK
1816}
1817
1818void bnx2x_netif_start(struct bnx2x *bp)
1819{
4b7ed897
DK
1820 if (netif_running(bp->dev)) {
1821 bnx2x_napi_enable(bp);
55c11941
MS
1822 if (CNIC_LOADED(bp))
1823 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1824 bnx2x_int_enable(bp);
1825 if (bp->state == BNX2X_STATE_OPEN)
1826 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1827 }
1828}
1829
1830void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1831{
1832 bnx2x_int_disable_sync(bp, disable_hw);
1833 bnx2x_napi_disable(bp);
55c11941
MS
1834 if (CNIC_LOADED(bp))
1835 bnx2x_napi_disable_cnic(bp);
9f6c9258 1836}
9f6c9258 1837
8307fa3e
VZ
1838u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1839{
8307fa3e 1840 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1841
55c11941 1842 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1843 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1844 u16 ether_type = ntohs(hdr->h_proto);
1845
1846 /* Skip VLAN tag if present */
1847 if (ether_type == ETH_P_8021Q) {
1848 struct vlan_ethhdr *vhdr =
1849 (struct vlan_ethhdr *)skb->data;
1850
1851 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1852 }
1853
1854 /* If ethertype is FCoE or FIP - use FCoE ring */
1855 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1856 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1857 }
55c11941 1858
cdb9d6ae 1859 /* select a non-FCoE queue */
ada7c19e 1860 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1861}
1862
d6214d7a
DK
1863void bnx2x_set_num_queues(struct bnx2x *bp)
1864{
96305234 1865 /* RSS queues */
55c11941 1866 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1867
a3348722
BW
1868 /* override in STORAGE SD modes */
1869 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1870 bp->num_ethernet_queues = 1;
1871
ec6ba945 1872 /* Add special queues */
55c11941
MS
1873 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1874 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1875
1876 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1877}
1878
cdb9d6ae
VZ
1879/**
1880 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1881 *
1882 * @bp: Driver handle
1883 *
1884 * We currently support for at most 16 Tx queues for each CoS thus we will
1885 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1886 * bp->max_cos.
1887 *
1888 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1889 * index after all ETH L2 indices.
1890 *
1891 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1892 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1893 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1894 *
1895 * The proper configuration of skb->queue_mapping is handled by
1896 * bnx2x_select_queue() and __skb_tx_hash().
1897 *
1898 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1899 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1900 */
55c11941 1901static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1902{
6383c0b3 1903 int rc, tx, rx;
ec6ba945 1904
65565884 1905 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1906 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1907
6383c0b3 1908/* account for fcoe queue */
55c11941
MS
1909 if (include_cnic && !NO_FCOE(bp)) {
1910 rx++;
1911 tx++;
6383c0b3 1912 }
6383c0b3
AE
1913
1914 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1915 if (rc) {
1916 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1917 return rc;
1918 }
1919 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1920 if (rc) {
1921 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1922 return rc;
1923 }
1924
51c1a580 1925 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1926 tx, rx);
1927
ec6ba945
VZ
1928 return rc;
1929}
1930
1191cb83 1931static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1932{
1933 int i;
1934
1935 for_each_queue(bp, i) {
1936 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1937 u32 mtu;
a8c94b91
VZ
1938
1939 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1940 if (IS_FCOE_IDX(i))
1941 /*
1942 * Although there are no IP frames expected to arrive to
1943 * this ring we still want to add an
1944 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1945 * overrun attack.
1946 */
e52fcb24 1947 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1948 else
e52fcb24
ED
1949 mtu = bp->dev->mtu;
1950 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1951 IP_HEADER_ALIGNMENT_PADDING +
1952 ETH_OVREHEAD +
1953 mtu +
1954 BNX2X_FW_RX_ALIGN_END;
16a5fd92 1955 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
1956 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1957 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1958 else
1959 fp->rx_frag_size = 0;
a8c94b91
VZ
1960 }
1961}
1962
60cad4e6 1963static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
1964{
1965 int i;
619c5cb6
VZ
1966 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1967
16a5fd92 1968 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
1969 * enabled
1970 */
5d317c6a
MS
1971 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1972 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1973 bp->fp->cl_id +
1974 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1975
1976 /*
1977 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1978 * per-port, so if explicit configuration is needed , do it only
1979 * for a PMF.
1980 *
1981 * For 57712 and newer on the other hand it's a per-function
1982 * configuration.
1983 */
5d317c6a 1984 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1985}
1986
60cad4e6
AE
1987int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1988 bool config_hash, bool enable)
619c5cb6 1989{
3b603066 1990 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1991
1992 /* Although RSS is meaningless when there is a single HW queue we
1993 * still need it enabled in order to have HW Rx hash generated.
1994 *
1995 * if (!is_eth_multi(bp))
1996 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1997 */
1998
96305234 1999 params.rss_obj = rss_obj;
619c5cb6
VZ
2000
2001 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2002
60cad4e6
AE
2003 if (enable) {
2004 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2005
2006 /* RSS configuration */
2007 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2008 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2009 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2010 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2011 if (rss_obj->udp_rss_v4)
2012 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2013 if (rss_obj->udp_rss_v6)
2014 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2015 } else {
2016 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2017 }
619c5cb6 2018
96305234
DK
2019 /* Hash bits */
2020 params.rss_result_mask = MULTI_MASK;
619c5cb6 2021
5d317c6a 2022 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2023
96305234
DK
2024 if (config_hash) {
2025 /* RSS keys */
60cad4e6 2026 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2027 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2028 }
2029
60cad4e6
AE
2030 if (IS_PF(bp))
2031 return bnx2x_config_rss(bp, &params);
2032 else
2033 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2034}
2035
1191cb83 2036static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2037{
3b603066 2038 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2039
2040 /* Prepare parameters for function state transitions */
2041 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2042
2043 func_params.f_obj = &bp->func_obj;
2044 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2045
2046 func_params.params.hw_init.load_phase = load_code;
2047
2048 return bnx2x_func_state_change(bp, &func_params);
2049}
2050
2051/*
2052 * Cleans the object that have internal lists without sending
16a5fd92 2053 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2054 */
7fa6f340 2055void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2056{
2057 int rc;
2058 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2059 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2060 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2061
2062 /***************** Cleanup MACs' object first *************************/
2063
2064 /* Wait for completion of requested */
2065 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2066 /* Perform a dry cleanup */
2067 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2068
2069 /* Clean ETH primary MAC */
2070 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2071 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2072 &ramrod_flags);
2073 if (rc != 0)
2074 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2075
2076 /* Cleanup UC list */
2077 vlan_mac_flags = 0;
2078 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2079 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2080 &ramrod_flags);
2081 if (rc != 0)
2082 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2083
2084 /***************** Now clean mcast object *****************************/
2085 rparam.mcast_obj = &bp->mcast_obj;
2086 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2087
8b09be5f
YM
2088 /* Add a DEL command... - Since we're doing a driver cleanup only,
2089 * we take a lock surrounding both the initial send and the CONTs,
2090 * as we don't want a true completion to disrupt us in the middle.
2091 */
2092 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2093 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2094 if (rc < 0)
51c1a580
MS
2095 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2096 rc);
619c5cb6
VZ
2097
2098 /* ...and wait until all pending commands are cleared */
2099 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2100 while (rc != 0) {
2101 if (rc < 0) {
2102 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2103 rc);
8b09be5f 2104 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2105 return;
2106 }
2107
2108 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2109 }
8b09be5f 2110 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2111}
2112
2113#ifndef BNX2X_STOP_ON_ERROR
2114#define LOAD_ERROR_EXIT(bp, label) \
2115 do { \
2116 (bp)->state = BNX2X_STATE_ERROR; \
2117 goto label; \
2118 } while (0)
55c11941
MS
2119
2120#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2121 do { \
2122 bp->cnic_loaded = false; \
2123 goto label; \
2124 } while (0)
2125#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2126#define LOAD_ERROR_EXIT(bp, label) \
2127 do { \
2128 (bp)->state = BNX2X_STATE_ERROR; \
2129 (bp)->panic = 1; \
2130 return -EBUSY; \
2131 } while (0)
55c11941
MS
2132#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2133 do { \
2134 bp->cnic_loaded = false; \
2135 (bp)->panic = 1; \
2136 return -EBUSY; \
2137 } while (0)
2138#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2139
ad5afc89
AE
2140static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2141{
2142 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2143 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2144 return;
2145}
2146
2147static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2148{
8db573ba 2149 int num_groups, vf_headroom = 0;
ad5afc89 2150 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2151
ad5afc89
AE
2152 /* number of queues for statistics is number of eth queues + FCoE */
2153 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2154
ad5afc89
AE
2155 /* Total number of FW statistics requests =
2156 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2157 * and fcoe l2 queue) stats + num of queues (which includes another 1
2158 * for fcoe l2 queue if applicable)
2159 */
2160 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2161
8db573ba
AE
2162 /* vf stats appear in the request list, but their data is allocated by
2163 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2164 * it is used to determine where to place the vf stats queries in the
2165 * request struct
2166 */
2167 if (IS_SRIOV(bp))
6411280a 2168 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2169
ad5afc89
AE
2170 /* Request is built from stats_query_header and an array of
2171 * stats_query_cmd_group each of which contains
2172 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2173 * configured in the stats_query_header.
2174 */
2175 num_groups =
8db573ba
AE
2176 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2177 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2178 1 : 0));
2179
8db573ba
AE
2180 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2181 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2182 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2183 num_groups * sizeof(struct stats_query_cmd_group);
2184
2185 /* Data for statistics requests + stats_counter
2186 * stats_counter holds per-STORM counters that are incremented
2187 * when STORM has finished with the current request.
2188 * memory for FCoE offloaded statistics are counted anyway,
2189 * even if they will not be sent.
2190 * VF stats are not accounted for here as the data of VF stats is stored
2191 * in memory allocated by the VF, not here.
2192 */
2193 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2194 sizeof(struct per_pf_stats) +
2195 sizeof(struct fcoe_statistics_params) +
2196 sizeof(struct per_queue_stats) * num_queue_stats +
2197 sizeof(struct stats_counter);
2198
2199 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2200 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2201
2202 /* Set shortcuts */
2203 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2204 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2205 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2206 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2207 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2208 bp->fw_stats_req_sz;
2209
6bf07b8e 2210 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2211 U64_HI(bp->fw_stats_req_mapping),
2212 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2213 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2214 U64_HI(bp->fw_stats_data_mapping),
2215 U64_LO(bp->fw_stats_data_mapping));
2216 return 0;
2217
2218alloc_mem_err:
2219 bnx2x_free_fw_stats_mem(bp);
2220 BNX2X_ERR("Can't allocate FW stats memory\n");
2221 return -ENOMEM;
2222}
2223
2224/* send load request to mcp and analyze response */
2225static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2226{
178135c1
DK
2227 u32 param;
2228
ad5afc89
AE
2229 /* init fw_seq */
2230 bp->fw_seq =
2231 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2232 DRV_MSG_SEQ_NUMBER_MASK);
2233 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2234
2235 /* Get current FW pulse sequence */
2236 bp->fw_drv_pulse_wr_seq =
2237 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2238 DRV_PULSE_SEQ_MASK);
2239 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2240
178135c1
DK
2241 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2242
2243 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2244 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2245
ad5afc89 2246 /* load request */
178135c1 2247 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2248
2249 /* if mcp fails to respond we must abort */
2250 if (!(*load_code)) {
2251 BNX2X_ERR("MCP response failure, aborting\n");
2252 return -EBUSY;
2253 }
2254
2255 /* If mcp refused (e.g. other port is in diagnostic mode) we
2256 * must abort
2257 */
2258 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2259 BNX2X_ERR("MCP refused load request, aborting\n");
2260 return -EBUSY;
2261 }
2262 return 0;
2263}
2264
2265/* check whether another PF has already loaded FW to chip. In
2266 * virtualized environments a pf from another VM may have already
2267 * initialized the device including loading FW
2268 */
2269int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2270{
2271 /* is another pf loaded on this engine? */
2272 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2273 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2274 /* build my FW version dword */
2275 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2276 (BCM_5710_FW_MINOR_VERSION << 8) +
2277 (BCM_5710_FW_REVISION_VERSION << 16) +
2278 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2279
2280 /* read loaded FW from chip */
2281 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2282
2283 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2284 loaded_fw, my_fw);
2285
2286 /* abort nic load if version mismatch */
2287 if (my_fw != loaded_fw) {
6bf07b8e 2288 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
452427b0 2289 loaded_fw, my_fw);
ad5afc89
AE
2290 return -EBUSY;
2291 }
2292 }
2293 return 0;
2294}
2295
2296/* returns the "mcp load_code" according to global load_count array */
2297static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2298{
2299 int path = BP_PATH(bp);
2300
2301 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2302 path, load_count[path][0], load_count[path][1],
2303 load_count[path][2]);
2304 load_count[path][0]++;
2305 load_count[path][1 + port]++;
2306 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2307 path, load_count[path][0], load_count[path][1],
2308 load_count[path][2]);
2309 if (load_count[path][0] == 1)
2310 return FW_MSG_CODE_DRV_LOAD_COMMON;
2311 else if (load_count[path][1 + port] == 1)
2312 return FW_MSG_CODE_DRV_LOAD_PORT;
2313 else
2314 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2315}
2316
2317/* mark PMF if applicable */
2318static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2319{
2320 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2321 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2322 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2323 bp->port.pmf = 1;
2324 /* We need the barrier to ensure the ordering between the
2325 * writing to bp->port.pmf here and reading it from the
2326 * bnx2x_periodic_task().
2327 */
2328 smp_mb();
2329 } else {
2330 bp->port.pmf = 0;
452427b0
YM
2331 }
2332
ad5afc89
AE
2333 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2334}
2335
2336static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2337{
2338 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2339 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2340 (bp->common.shmem2_base)) {
2341 if (SHMEM2_HAS(bp, dcc_support))
2342 SHMEM2_WR(bp, dcc_support,
2343 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2344 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2345 if (SHMEM2_HAS(bp, afex_driver_support))
2346 SHMEM2_WR(bp, afex_driver_support,
2347 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2348 }
2349
2350 /* Set AFEX default VLAN tag to an invalid value */
2351 bp->afex_def_vlan_tag = -1;
452427b0
YM
2352}
2353
1191cb83
ED
2354/**
2355 * bnx2x_bz_fp - zero content of the fastpath structure.
2356 *
2357 * @bp: driver handle
2358 * @index: fastpath index to be zeroed
2359 *
2360 * Makes sure the contents of the bp->fp[index].napi is kept
2361 * intact.
2362 */
2363static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2364{
2365 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2366 int cos;
1191cb83 2367 struct napi_struct orig_napi = fp->napi;
15192a8c 2368 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2369
1191cb83 2370 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2371 if (fp->tpa_info)
2372 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2373 sizeof(struct bnx2x_agg_info));
2374 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2375
2376 /* Restore the NAPI object as it has been already initialized */
2377 fp->napi = orig_napi;
15192a8c 2378 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2379 fp->bp = bp;
2380 fp->index = index;
2381 if (IS_ETH_FP(fp))
2382 fp->max_cos = bp->max_cos;
2383 else
2384 /* Special queues support only one CoS */
2385 fp->max_cos = 1;
2386
65565884 2387 /* Init txdata pointers */
65565884
MS
2388 if (IS_FCOE_FP(fp))
2389 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2390 if (IS_ETH_FP(fp))
2391 for_each_cos_in_tx_queue(fp, cos)
2392 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2393 BNX2X_NUM_ETH_QUEUES(bp) + index];
2394
16a5fd92 2395 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2396 * minimal size so it must be set prior to queue memory allocation
2397 */
2398 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2399 (bp->flags & GRO_ENABLE_FLAG &&
2400 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2401 if (bp->flags & TPA_ENABLE_FLAG)
2402 fp->mode = TPA_MODE_LRO;
2403 else if (bp->flags & GRO_ENABLE_FLAG)
2404 fp->mode = TPA_MODE_GRO;
2405
1191cb83
ED
2406 /* We don't want TPA on an FCoE L2 ring */
2407 if (IS_FCOE_FP(fp))
2408 fp->disable_tpa = 1;
55c11941
MS
2409}
2410
2411int bnx2x_load_cnic(struct bnx2x *bp)
2412{
2413 int i, rc, port = BP_PORT(bp);
2414
2415 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2416
2417 mutex_init(&bp->cnic_mutex);
2418
ad5afc89
AE
2419 if (IS_PF(bp)) {
2420 rc = bnx2x_alloc_mem_cnic(bp);
2421 if (rc) {
2422 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2423 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2424 }
55c11941
MS
2425 }
2426
2427 rc = bnx2x_alloc_fp_mem_cnic(bp);
2428 if (rc) {
2429 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2430 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2431 }
2432
2433 /* Update the number of queues with the cnic queues */
2434 rc = bnx2x_set_real_num_queues(bp, 1);
2435 if (rc) {
2436 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2437 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2438 }
2439
2440 /* Add all CNIC NAPI objects */
2441 bnx2x_add_all_napi_cnic(bp);
2442 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2443 bnx2x_napi_enable_cnic(bp);
2444
2445 rc = bnx2x_init_hw_func_cnic(bp);
2446 if (rc)
2447 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2448
2449 bnx2x_nic_init_cnic(bp);
2450
ad5afc89
AE
2451 if (IS_PF(bp)) {
2452 /* Enable Timer scan */
2453 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2454
2455 /* setup cnic queues */
2456 for_each_cnic_queue(bp, i) {
2457 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2458 if (rc) {
2459 BNX2X_ERR("Queue setup failed\n");
2460 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2461 }
55c11941
MS
2462 }
2463 }
2464
2465 /* Initialize Rx filter. */
8b09be5f 2466 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2467
2468 /* re-read iscsi info */
2469 bnx2x_get_iscsi_info(bp);
2470 bnx2x_setup_cnic_irq_info(bp);
2471 bnx2x_setup_cnic_info(bp);
2472 bp->cnic_loaded = true;
2473 if (bp->state == BNX2X_STATE_OPEN)
2474 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2475
55c11941
MS
2476 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2477
2478 return 0;
2479
2480#ifndef BNX2X_STOP_ON_ERROR
2481load_error_cnic2:
2482 /* Disable Timer scan */
2483 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2484
2485load_error_cnic1:
2486 bnx2x_napi_disable_cnic(bp);
2487 /* Update the number of queues without the cnic queues */
d9d81862 2488 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2489 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2490load_error_cnic0:
2491 BNX2X_ERR("CNIC-related load failed\n");
2492 bnx2x_free_fp_mem_cnic(bp);
2493 bnx2x_free_mem_cnic(bp);
2494 return rc;
2495#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2496}
2497
9f6c9258
DK
2498/* must be called with rtnl_lock */
2499int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2500{
619c5cb6 2501 int port = BP_PORT(bp);
ad5afc89 2502 int i, rc = 0, load_code = 0;
9f6c9258 2503
55c11941
MS
2504 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2505 DP(NETIF_MSG_IFUP,
2506 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2507
9f6c9258 2508#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2509 if (unlikely(bp->panic)) {
2510 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2511 return -EPERM;
51c1a580 2512 }
9f6c9258
DK
2513#endif
2514
2515 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2516
16a5fd92 2517 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2518 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2519 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2520 &bp->last_reported_link.link_report_flags);
2ae17f66 2521
ad5afc89
AE
2522 if (IS_PF(bp))
2523 /* must be called before memory allocation and HW init */
2524 bnx2x_ilt_set_info(bp);
523224a3 2525
6383c0b3
AE
2526 /*
2527 * Zero fastpath structures preserving invariants like napi, which are
2528 * allocated only once, fp index, max_cos, bp pointer.
65565884 2529 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2530 */
51c1a580 2531 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2532 for_each_queue(bp, i)
2533 bnx2x_bz_fp(bp, i);
55c11941
MS
2534 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2535 bp->num_cnic_queues) *
2536 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2537
55c11941 2538 bp->fcoe_init = false;
6383c0b3 2539
a8c94b91
VZ
2540 /* Set the receive queues buffer size */
2541 bnx2x_set_rx_buf_size(bp);
2542
ad5afc89
AE
2543 if (IS_PF(bp)) {
2544 rc = bnx2x_alloc_mem(bp);
2545 if (rc) {
2546 BNX2X_ERR("Unable to allocate bp memory\n");
2547 return rc;
2548 }
2549 }
2550
ad5afc89
AE
2551 /* need to be done after alloc mem, since it's self adjusting to amount
2552 * of memory available for RSS queues
2553 */
2554 rc = bnx2x_alloc_fp_mem(bp);
2555 if (rc) {
2556 BNX2X_ERR("Unable to allocate memory for fps\n");
2557 LOAD_ERROR_EXIT(bp, load_error0);
2558 }
d6214d7a 2559
e3ed4eae
DK
2560 /* Allocated memory for FW statistics */
2561 if (bnx2x_alloc_fw_stats_mem(bp))
2562 LOAD_ERROR_EXIT(bp, load_error0);
2563
8d9ac297
AE
2564 /* request pf to initialize status blocks */
2565 if (IS_VF(bp)) {
2566 rc = bnx2x_vfpf_init(bp);
2567 if (rc)
2568 LOAD_ERROR_EXIT(bp, load_error0);
2569 }
2570
b3b83c3f
DK
2571 /* As long as bnx2x_alloc_mem() may possibly update
2572 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2573 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2574 */
55c11941 2575 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2576 if (rc) {
ec6ba945 2577 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2578 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2579 }
2580
6383c0b3 2581 /* configure multi cos mappings in kernel.
16a5fd92
YM
2582 * this configuration may be overridden by a multi class queue
2583 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2584 */
2585 bnx2x_setup_tc(bp->dev, bp->max_cos);
2586
26614ba5
MS
2587 /* Add all NAPI objects */
2588 bnx2x_add_all_napi(bp);
55c11941 2589 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2590 bnx2x_napi_enable(bp);
2591
ad5afc89
AE
2592 if (IS_PF(bp)) {
2593 /* set pf load just before approaching the MCP */
2594 bnx2x_set_pf_load(bp);
2595
2596 /* if mcp exists send load request and analyze response */
2597 if (!BP_NOMCP(bp)) {
2598 /* attempt to load pf */
2599 rc = bnx2x_nic_load_request(bp, &load_code);
2600 if (rc)
2601 LOAD_ERROR_EXIT(bp, load_error1);
2602
2603 /* what did mcp say? */
2604 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2605 if (rc) {
2606 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2607 LOAD_ERROR_EXIT(bp, load_error2);
2608 }
ad5afc89
AE
2609 } else {
2610 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2611 }
9f6c9258 2612
ad5afc89
AE
2613 /* mark pmf if applicable */
2614 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2615
ad5afc89
AE
2616 /* Init Function state controlling object */
2617 bnx2x__init_func_obj(bp);
6383c0b3 2618
ad5afc89
AE
2619 /* Initialize HW */
2620 rc = bnx2x_init_hw(bp, load_code);
2621 if (rc) {
2622 BNX2X_ERR("HW init failed, aborting\n");
2623 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2624 LOAD_ERROR_EXIT(bp, load_error2);
2625 }
9f6c9258
DK
2626 }
2627
ecf01c22
YM
2628 bnx2x_pre_irq_nic_init(bp);
2629
d6214d7a
DK
2630 /* Connect to IRQs */
2631 rc = bnx2x_setup_irqs(bp);
523224a3 2632 if (rc) {
ad5afc89
AE
2633 BNX2X_ERR("setup irqs failed\n");
2634 if (IS_PF(bp))
2635 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2636 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2637 }
2638
619c5cb6 2639 /* Init per-function objects */
ad5afc89 2640 if (IS_PF(bp)) {
ecf01c22
YM
2641 /* Setup NIC internals and enable interrupts */
2642 bnx2x_post_irq_nic_init(bp, load_code);
2643
ad5afc89 2644 bnx2x_init_bp_objs(bp);
b56e9670 2645 bnx2x_iov_nic_init(bp);
a3348722 2646
ad5afc89
AE
2647 /* Set AFEX default VLAN tag to an invalid value */
2648 bp->afex_def_vlan_tag = -1;
2649 bnx2x_nic_load_afex_dcc(bp, load_code);
2650 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2651 rc = bnx2x_func_start(bp);
2652 if (rc) {
2653 BNX2X_ERR("Function start failed!\n");
2654 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2655
619c5cb6 2656 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2657 }
9f6c9258 2658
ad5afc89
AE
2659 /* Send LOAD_DONE command to MCP */
2660 if (!BP_NOMCP(bp)) {
2661 load_code = bnx2x_fw_command(bp,
2662 DRV_MSG_CODE_LOAD_DONE, 0);
2663 if (!load_code) {
2664 BNX2X_ERR("MCP response failure, aborting\n");
2665 rc = -EBUSY;
2666 LOAD_ERROR_EXIT(bp, load_error3);
2667 }
2668 }
9f6c9258 2669
0c14e5ce
AE
2670 /* initialize FW coalescing state machines in RAM */
2671 bnx2x_update_coalesce(bp);
60cad4e6 2672 }
0c14e5ce 2673
60cad4e6
AE
2674 /* setup the leading queue */
2675 rc = bnx2x_setup_leading(bp);
2676 if (rc) {
2677 BNX2X_ERR("Setup leading failed!\n");
2678 LOAD_ERROR_EXIT(bp, load_error3);
2679 }
ad5afc89 2680
60cad4e6
AE
2681 /* set up the rest of the queues */
2682 for_each_nondefault_eth_queue(bp, i) {
2683 if (IS_PF(bp))
2684 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2685 else /* VF */
2686 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2687 if (rc) {
60cad4e6 2688 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2689 LOAD_ERROR_EXIT(bp, load_error3);
2690 }
60cad4e6 2691 }
8d9ac297 2692
60cad4e6
AE
2693 /* setup rss */
2694 rc = bnx2x_init_rss(bp);
2695 if (rc) {
2696 BNX2X_ERR("PF RSS init failed\n");
2697 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2698 }
619c5cb6 2699
523224a3
DK
2700 /* Now when Clients are configured we are ready to work */
2701 bp->state = BNX2X_STATE_OPEN;
2702
619c5cb6 2703 /* Configure a ucast MAC */
ad5afc89
AE
2704 if (IS_PF(bp))
2705 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2706 else /* vf */
f8f4f61a
DK
2707 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2708 true);
51c1a580
MS
2709 if (rc) {
2710 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2711 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2712 }
6e30dd4e 2713
ad5afc89 2714 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2715 bnx2x_update_max_mf_config(bp, bp->pending_max);
2716 bp->pending_max = 0;
2717 }
2718
ad5afc89
AE
2719 if (bp->port.pmf) {
2720 rc = bnx2x_initial_phy_init(bp, load_mode);
2721 if (rc)
2722 LOAD_ERROR_EXIT(bp, load_error3);
2723 }
c63da990 2724 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2725
619c5cb6
VZ
2726 /* Start fast path */
2727
2728 /* Initialize Rx filter. */
8b09be5f 2729 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2730
619c5cb6 2731 /* Start the Tx */
9f6c9258
DK
2732 switch (load_mode) {
2733 case LOAD_NORMAL:
16a5fd92 2734 /* Tx queue should be only re-enabled */
523224a3 2735 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2736 break;
2737
2738 case LOAD_OPEN:
2739 netif_tx_start_all_queues(bp->dev);
523224a3 2740 smp_mb__after_clear_bit();
9f6c9258
DK
2741 break;
2742
2743 case LOAD_DIAG:
8970b2e4 2744 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2745 bp->state = BNX2X_STATE_DIAG;
2746 break;
2747
2748 default:
2749 break;
2750 }
2751
00253a8c 2752 if (bp->port.pmf)
4c704899 2753 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2754 else
9f6c9258
DK
2755 bnx2x__link_status_update(bp);
2756
2757 /* start the timer */
2758 mod_timer(&bp->timer, jiffies + bp->current_interval);
2759
55c11941
MS
2760 if (CNIC_ENABLED(bp))
2761 bnx2x_load_cnic(bp);
9f6c9258 2762
ad5afc89
AE
2763 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2764 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2765 u32 val;
2766 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2767 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2768 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2769 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2770 }
2771
619c5cb6 2772 /* Wait for all pending SP commands to complete */
ad5afc89 2773 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2774 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2775 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2776 return -EBUSY;
2777 }
6891dd25 2778
9876879f
BW
2779 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2780 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2781 bnx2x_dcbx_init(bp, false);
2782
55c11941
MS
2783 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2784
9f6c9258
DK
2785 return 0;
2786
619c5cb6 2787#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2788load_error3:
ad5afc89
AE
2789 if (IS_PF(bp)) {
2790 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2791
ad5afc89
AE
2792 /* Clean queueable objects */
2793 bnx2x_squeeze_objects(bp);
2794 }
619c5cb6 2795
9f6c9258
DK
2796 /* Free SKBs, SGEs, TPA pool and driver internals */
2797 bnx2x_free_skbs(bp);
ec6ba945 2798 for_each_rx_queue(bp, i)
9f6c9258 2799 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2800
9f6c9258 2801 /* Release IRQs */
d6214d7a
DK
2802 bnx2x_free_irq(bp);
2803load_error2:
ad5afc89 2804 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2805 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2806 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2807 }
2808
2809 bp->port.pmf = 0;
9f6c9258
DK
2810load_error1:
2811 bnx2x_napi_disable(bp);
722c6f58 2812 bnx2x_del_all_napi(bp);
ad5afc89 2813
889b9af3 2814 /* clear pf_load status, as it was already set */
ad5afc89
AE
2815 if (IS_PF(bp))
2816 bnx2x_clear_pf_load(bp);
d6214d7a 2817load_error0:
ad5afc89 2818 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2819 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2820 bnx2x_free_mem(bp);
2821
2822 return rc;
619c5cb6 2823#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2824}
2825
7fa6f340 2826int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2827{
2828 u8 rc = 0, cos, i;
2829
2830 /* Wait until tx fastpath tasks complete */
2831 for_each_tx_queue(bp, i) {
2832 struct bnx2x_fastpath *fp = &bp->fp[i];
2833
2834 for_each_cos_in_tx_queue(fp, cos)
2835 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2836 if (rc)
2837 return rc;
2838 }
2839 return 0;
2840}
2841
9f6c9258 2842/* must be called with rtnl_lock */
5d07d868 2843int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2844{
2845 int i;
c9ee9206
VZ
2846 bool global = false;
2847
55c11941
MS
2848 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2849
9ce392d4 2850 /* mark driver is unloaded in shmem2 */
ad5afc89 2851 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2852 u32 val;
2853 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2854 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2855 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2856 }
2857
80bfe5cc 2858 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2859 (bp->state == BNX2X_STATE_CLOSED ||
2860 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2861 /* We can get here if the driver has been unloaded
2862 * during parity error recovery and is either waiting for a
2863 * leader to complete or for other functions to unload and
2864 * then ifdown has been issued. In this case we want to
2865 * unload and let other functions to complete a recovery
2866 * process.
2867 */
9f6c9258
DK
2868 bp->recovery_state = BNX2X_RECOVERY_DONE;
2869 bp->is_leader = 0;
c9ee9206
VZ
2870 bnx2x_release_leader_lock(bp);
2871 smp_mb();
2872
51c1a580
MS
2873 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2874 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2875 return -EINVAL;
2876 }
2877
80bfe5cc 2878 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2879 * have not completed successfully - all resources are released.
80bfe5cc
YM
2880 *
2881 * we can get here only after unsuccessful ndo_* callback, during which
2882 * dev->IFF_UP flag is still on.
2883 */
2884 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2885 return 0;
2886
2887 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2888 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2889 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2890 */
2891 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2892 smp_mb();
2893
78c3bcc5
AE
2894 /* indicate to VFs that the PF is going down */
2895 bnx2x_iov_channel_down(bp);
2896
55c11941
MS
2897 if (CNIC_LOADED(bp))
2898 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2899
9505ee37
VZ
2900 /* Stop Tx */
2901 bnx2x_tx_disable(bp);
65565884 2902 netdev_reset_tc(bp->dev);
9505ee37 2903
9f6c9258 2904 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2905
9f6c9258 2906 del_timer_sync(&bp->timer);
f85582f8 2907
ad5afc89
AE
2908 if (IS_PF(bp)) {
2909 /* Set ALWAYS_ALIVE bit in shmem */
2910 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2911 bnx2x_drv_pulse(bp);
2912 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2913 bnx2x_save_statistics(bp);
2914 }
9f6c9258 2915
ad5afc89
AE
2916 /* wait till consumers catch up with producers in all queues */
2917 bnx2x_drain_tx_queues(bp);
9f6c9258 2918
9b176b6b
AE
2919 /* if VF indicate to PF this function is going down (PF will delete sp
2920 * elements and clear initializations
2921 */
2922 if (IS_VF(bp))
2923 bnx2x_vfpf_close_vf(bp);
2924 else if (unload_mode != UNLOAD_RECOVERY)
2925 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2926 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2927 else {
c9ee9206
VZ
2928 /* Send the UNLOAD_REQUEST to the MCP */
2929 bnx2x_send_unload_req(bp, unload_mode);
2930
16a5fd92 2931 /* Prevent transactions to host from the functions on the
c9ee9206 2932 * engine that doesn't reset global blocks in case of global
16a5fd92 2933 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
2934 * (the engine which leader will perform the recovery
2935 * last).
2936 */
2937 if (!CHIP_IS_E1x(bp))
2938 bnx2x_pf_disable(bp);
2939
2940 /* Disable HW interrupts, NAPI */
523224a3 2941 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2942 /* Delete all NAPI objects */
2943 bnx2x_del_all_napi(bp);
55c11941
MS
2944 if (CNIC_LOADED(bp))
2945 bnx2x_del_all_napi_cnic(bp);
523224a3 2946 /* Release IRQs */
d6214d7a 2947 bnx2x_free_irq(bp);
c9ee9206
VZ
2948
2949 /* Report UNLOAD_DONE to MCP */
5d07d868 2950 bnx2x_send_unload_done(bp, false);
523224a3 2951 }
9f6c9258 2952
619c5cb6 2953 /*
16a5fd92 2954 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
2955 * the queueable objects here in case they failed to get cleaned so far.
2956 */
ad5afc89
AE
2957 if (IS_PF(bp))
2958 bnx2x_squeeze_objects(bp);
619c5cb6 2959
79616895
VZ
2960 /* There should be no more pending SP commands at this stage */
2961 bp->sp_state = 0;
2962
9f6c9258
DK
2963 bp->port.pmf = 0;
2964
a0d307b2
DK
2965 /* clear pending work in rtnl task */
2966 bp->sp_rtnl_state = 0;
2967 smp_mb();
2968
9f6c9258
DK
2969 /* Free SKBs, SGEs, TPA pool and driver internals */
2970 bnx2x_free_skbs(bp);
55c11941
MS
2971 if (CNIC_LOADED(bp))
2972 bnx2x_free_skbs_cnic(bp);
ec6ba945 2973 for_each_rx_queue(bp, i)
9f6c9258 2974 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2975
ad5afc89
AE
2976 bnx2x_free_fp_mem(bp);
2977 if (CNIC_LOADED(bp))
55c11941 2978 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2979
ad5afc89 2980 if (IS_PF(bp)) {
ad5afc89
AE
2981 if (CNIC_LOADED(bp))
2982 bnx2x_free_mem_cnic(bp);
2983 }
b4cddbd6
AE
2984 bnx2x_free_mem(bp);
2985
9f6c9258 2986 bp->state = BNX2X_STATE_CLOSED;
55c11941 2987 bp->cnic_loaded = false;
9f6c9258 2988
c9ee9206
VZ
2989 /* Check if there are pending parity attentions. If there are - set
2990 * RECOVERY_IN_PROGRESS.
2991 */
ad5afc89 2992 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2993 bnx2x_set_reset_in_progress(bp);
2994
2995 /* Set RESET_IS_GLOBAL if needed */
2996 if (global)
2997 bnx2x_set_reset_global(bp);
2998 }
2999
9f6c9258
DK
3000 /* The last driver must disable a "close the gate" if there is no
3001 * parity attention or "process kill" pending.
3002 */
ad5afc89
AE
3003 if (IS_PF(bp) &&
3004 !bnx2x_clear_pf_load(bp) &&
3005 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3006 bnx2x_disable_close_the_gate(bp);
3007
55c11941
MS
3008 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3009
9f6c9258
DK
3010 return 0;
3011}
f85582f8 3012
9f6c9258
DK
3013int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3014{
3015 u16 pmcsr;
3016
adf5f6a1 3017 /* If there is no power capability, silently succeed */
29ed74c3 3018 if (!bp->pdev->pm_cap) {
51c1a580 3019 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3020 return 0;
3021 }
3022
29ed74c3 3023 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3024
3025 switch (state) {
3026 case PCI_D0:
29ed74c3 3027 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3028 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3029 PCI_PM_CTRL_PME_STATUS));
3030
3031 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3032 /* delay required during transition out of D3hot */
3033 msleep(20);
3034 break;
3035
3036 case PCI_D3hot:
3037 /* If there are other clients above don't
3038 shut down the power */
3039 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3040 return 0;
3041 /* Don't shut down the power for emulation and FPGA */
3042 if (CHIP_REV_IS_SLOW(bp))
3043 return 0;
3044
3045 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3046 pmcsr |= 3;
3047
3048 if (bp->wol)
3049 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3050
29ed74c3 3051 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3052 pmcsr);
3053
3054 /* No more memory access after this point until
3055 * device is brought back to D0.
3056 */
3057 break;
3058
3059 default:
51c1a580 3060 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3061 return -EINVAL;
3062 }
3063 return 0;
3064}
3065
9f6c9258
DK
3066/*
3067 * net_device service functions
3068 */
d6214d7a 3069int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3070{
3071 int work_done = 0;
6383c0b3 3072 u8 cos;
9f6c9258
DK
3073 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3074 napi);
3075 struct bnx2x *bp = fp->bp;
3076
3077 while (1) {
3078#ifdef BNX2X_STOP_ON_ERROR
3079 if (unlikely(bp->panic)) {
3080 napi_complete(napi);
3081 return 0;
3082 }
3083#endif
8f20aa57
DK
3084 if (!bnx2x_fp_lock_napi(fp))
3085 return work_done;
9f6c9258 3086
6383c0b3 3087 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3088 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3089 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3090
9f6c9258
DK
3091 if (bnx2x_has_rx_work(fp)) {
3092 work_done += bnx2x_rx_int(fp, budget - work_done);
3093
3094 /* must not complete if we consumed full budget */
8f20aa57
DK
3095 if (work_done >= budget) {
3096 bnx2x_fp_unlock_napi(fp);
9f6c9258 3097 break;
8f20aa57 3098 }
9f6c9258
DK
3099 }
3100
3101 /* Fall out from the NAPI loop if needed */
8f20aa57
DK
3102 if (!bnx2x_fp_unlock_napi(fp) &&
3103 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3104
ec6ba945
VZ
3105 /* No need to update SB for FCoE L2 ring as long as
3106 * it's connected to the default SB and the SB
3107 * has been updated when NAPI was scheduled.
3108 */
3109 if (IS_FCOE_FP(fp)) {
3110 napi_complete(napi);
3111 break;
3112 }
9f6c9258 3113 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3114 /* bnx2x_has_rx_work() reads the status block,
3115 * thus we need to ensure that status block indices
3116 * have been actually read (bnx2x_update_fpsb_idx)
3117 * prior to this check (bnx2x_has_rx_work) so that
3118 * we won't write the "newer" value of the status block
3119 * to IGU (if there was a DMA right after
3120 * bnx2x_has_rx_work and if there is no rmb, the memory
3121 * reading (bnx2x_update_fpsb_idx) may be postponed
3122 * to right before bnx2x_ack_sb). In this case there
3123 * will never be another interrupt until there is
3124 * another update of the status block, while there
3125 * is still unhandled work.
3126 */
9f6c9258
DK
3127 rmb();
3128
3129 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3130 napi_complete(napi);
3131 /* Re-enable interrupts */
51c1a580 3132 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3133 "Update index to %d\n", fp->fp_hc_idx);
3134 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3135 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3136 IGU_INT_ENABLE, 1);
3137 break;
3138 }
3139 }
3140 }
3141
3142 return work_done;
3143}
3144
e0d1095a 3145#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3146/* must be called with local_bh_disable()d */
3147int bnx2x_low_latency_recv(struct napi_struct *napi)
3148{
3149 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3150 napi);
3151 struct bnx2x *bp = fp->bp;
3152 int found = 0;
3153
3154 if ((bp->state == BNX2X_STATE_CLOSED) ||
3155 (bp->state == BNX2X_STATE_ERROR) ||
3156 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3157 return LL_FLUSH_FAILED;
3158
3159 if (!bnx2x_fp_lock_poll(fp))
3160 return LL_FLUSH_BUSY;
3161
75b29459 3162 if (bnx2x_has_rx_work(fp))
8f20aa57 3163 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3164
3165 bnx2x_fp_unlock_poll(fp);
3166
3167 return found;
3168}
3169#endif
3170
9f6c9258
DK
3171/* we split the first BD into headers and data BDs
3172 * to ease the pain of our fellow microcode engineers
3173 * we use one mapping for both BDs
9f6c9258 3174 */
91226790
DK
3175static u16 bnx2x_tx_split(struct bnx2x *bp,
3176 struct bnx2x_fp_txdata *txdata,
3177 struct sw_tx_bd *tx_buf,
3178 struct eth_tx_start_bd **tx_bd, u16 hlen,
3179 u16 bd_prod)
9f6c9258
DK
3180{
3181 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3182 struct eth_tx_bd *d_tx_bd;
3183 dma_addr_t mapping;
3184 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3185
3186 /* first fix first BD */
9f6c9258
DK
3187 h_tx_bd->nbytes = cpu_to_le16(hlen);
3188
91226790
DK
3189 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3190 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3191
3192 /* now get a new data BD
3193 * (after the pbd) and fill it */
3194 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3195 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3196
3197 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3198 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3199
3200 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3201 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3202 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3203
3204 /* this marks the BD as one that has no individual mapping */
3205 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3206
3207 DP(NETIF_MSG_TX_QUEUED,
3208 "TSO split data size is %d (%x:%x)\n",
3209 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3210
3211 /* update tx_bd */
3212 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3213
3214 return bd_prod;
3215}
3216
86564c3f
YM
3217#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3218#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3219static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3220{
86564c3f
YM
3221 __sum16 tsum = (__force __sum16) csum;
3222
9f6c9258 3223 if (fix > 0)
86564c3f
YM
3224 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3225 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3226
3227 else if (fix < 0)
86564c3f
YM
3228 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3229 csum_partial(t_header, -fix, 0)));
9f6c9258 3230
e2593fcd 3231 return bswab16(tsum);
9f6c9258
DK
3232}
3233
91226790 3234static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3235{
3236 u32 rc;
a848ade4
DK
3237 __u8 prot = 0;
3238 __be16 protocol;
9f6c9258
DK
3239
3240 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3241 return XMIT_PLAIN;
9f6c9258 3242
a848ade4
DK
3243 protocol = vlan_get_protocol(skb);
3244 if (protocol == htons(ETH_P_IPV6)) {
3245 rc = XMIT_CSUM_V6;
3246 prot = ipv6_hdr(skb)->nexthdr;
3247 } else {
3248 rc = XMIT_CSUM_V4;
3249 prot = ip_hdr(skb)->protocol;
3250 }
9f6c9258 3251
a848ade4
DK
3252 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3253 if (inner_ip_hdr(skb)->version == 6) {
3254 rc |= XMIT_CSUM_ENC_V6;
3255 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3256 rc |= XMIT_CSUM_TCP;
9f6c9258 3257 } else {
a848ade4
DK
3258 rc |= XMIT_CSUM_ENC_V4;
3259 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3260 rc |= XMIT_CSUM_TCP;
3261 }
3262 }
a848ade4
DK
3263 if (prot == IPPROTO_TCP)
3264 rc |= XMIT_CSUM_TCP;
9f6c9258 3265
36a8f39e
ED
3266 if (skb_is_gso(skb)) {
3267 if (skb_is_gso_v6(skb)) {
3268 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3269 if (rc & XMIT_CSUM_ENC)
3270 rc |= XMIT_GSO_ENC_V6;
3271 } else {
3272 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3273 if (rc & XMIT_CSUM_ENC)
3274 rc |= XMIT_GSO_ENC_V4;
3275 }
a848ade4 3276 }
9f6c9258
DK
3277
3278 return rc;
3279}
3280
3281#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3282/* check if packet requires linearization (packet is too fragmented)
3283 no need to check fragmentation if page size > 8K (there will be no
3284 violation to FW restrictions) */
3285static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3286 u32 xmit_type)
3287{
3288 int to_copy = 0;
3289 int hlen = 0;
3290 int first_bd_sz = 0;
3291
3292 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3293 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3294
3295 if (xmit_type & XMIT_GSO) {
3296 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3297 /* Check if LSO packet needs to be copied:
3298 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3299 int wnd_size = MAX_FETCH_BD - 3;
3300 /* Number of windows to check */
3301 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3302 int wnd_idx = 0;
3303 int frag_idx = 0;
3304 u32 wnd_sum = 0;
3305
3306 /* Headers length */
3307 hlen = (int)(skb_transport_header(skb) - skb->data) +
3308 tcp_hdrlen(skb);
3309
3310 /* Amount of data (w/o headers) on linear part of SKB*/
3311 first_bd_sz = skb_headlen(skb) - hlen;
3312
3313 wnd_sum = first_bd_sz;
3314
3315 /* Calculate the first sum - it's special */
3316 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3317 wnd_sum +=
9e903e08 3318 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3319
3320 /* If there was data on linear skb data - check it */
3321 if (first_bd_sz > 0) {
3322 if (unlikely(wnd_sum < lso_mss)) {
3323 to_copy = 1;
3324 goto exit_lbl;
3325 }
3326
3327 wnd_sum -= first_bd_sz;
3328 }
3329
3330 /* Others are easier: run through the frag list and
3331 check all windows */
3332 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3333 wnd_sum +=
9e903e08 3334 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3335
3336 if (unlikely(wnd_sum < lso_mss)) {
3337 to_copy = 1;
3338 break;
3339 }
3340 wnd_sum -=
9e903e08 3341 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3342 }
3343 } else {
3344 /* in non-LSO too fragmented packet should always
3345 be linearized */
3346 to_copy = 1;
3347 }
3348 }
3349
3350exit_lbl:
3351 if (unlikely(to_copy))
3352 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3353 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3354 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3355 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3356
3357 return to_copy;
3358}
3359#endif
3360
91226790
DK
3361static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3362 u32 xmit_type)
f2e0899f 3363{
a848ade4
DK
3364 struct ipv6hdr *ipv6;
3365
2297a2da
VZ
3366 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3367 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3368 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3369
3370 if (xmit_type & XMIT_GSO_ENC_V6)
3371 ipv6 = inner_ipv6_hdr(skb);
3372 else if (xmit_type & XMIT_GSO_V6)
3373 ipv6 = ipv6_hdr(skb);
3374 else
3375 ipv6 = NULL;
3376
3377 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3378 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3379}
3380
3381/**
e8920674 3382 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3383 *
e8920674
DK
3384 * @skb: packet skb
3385 * @pbd: parse BD
3386 * @xmit_type: xmit flags
f2e0899f 3387 */
91226790
DK
3388static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3389 struct eth_tx_parse_bd_e1x *pbd,
057cf65e 3390 struct eth_tx_start_bd *tx_start_bd,
91226790 3391 u32 xmit_type)
f2e0899f
DK
3392{
3393 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3394 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3395 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3396
3397 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3398 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3399 pbd->tcp_pseudo_csum =
86564c3f
YM
3400 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3401 ip_hdr(skb)->daddr,
3402 0, IPPROTO_TCP, 0));
f2e0899f 3403
057cf65e
YM
3404 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3405 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3406 } else {
f2e0899f 3407 pbd->tcp_pseudo_csum =
86564c3f
YM
3408 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3409 &ipv6_hdr(skb)->daddr,
3410 0, IPPROTO_TCP, 0));
057cf65e 3411 }
f2e0899f 3412
86564c3f
YM
3413 pbd->global_data |=
3414 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3415}
f85582f8 3416
a848ade4
DK
3417/**
3418 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3419 *
3420 * @bp: driver handle
3421 * @skb: packet skb
3422 * @parsing_data: data to be updated
3423 * @xmit_type: xmit flags
3424 *
3425 * 57712/578xx related, when skb has encapsulation
3426 */
3427static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3428 u32 *parsing_data, u32 xmit_type)
3429{
3430 *parsing_data |=
3431 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3432 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3433 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3434
3435 if (xmit_type & XMIT_CSUM_TCP) {
3436 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3437 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3438 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3439
3440 return skb_inner_transport_header(skb) +
3441 inner_tcp_hdrlen(skb) - skb->data;
3442 }
3443
3444 /* We support checksum offload for TCP and UDP only.
3445 * No need to pass the UDP header length - it's a constant.
3446 */
3447 return skb_inner_transport_header(skb) +
3448 sizeof(struct udphdr) - skb->data;
3449}
3450
f2e0899f 3451/**
e8920674 3452 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3453 *
e8920674
DK
3454 * @bp: driver handle
3455 * @skb: packet skb
3456 * @parsing_data: data to be updated
3457 * @xmit_type: xmit flags
f2e0899f 3458 *
91226790 3459 * 57712/578xx related
f2e0899f 3460 */
91226790
DK
3461static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3462 u32 *parsing_data, u32 xmit_type)
f2e0899f 3463{
e39aece7 3464 *parsing_data |=
2de67439 3465 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3466 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3467 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3468
e39aece7
VZ
3469 if (xmit_type & XMIT_CSUM_TCP) {
3470 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3471 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3472 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3473
e39aece7 3474 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3475 }
3476 /* We support checksum offload for TCP and UDP only.
3477 * No need to pass the UDP header length - it's a constant.
3478 */
3479 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3480}
3481
a848ade4 3482/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3483static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3484 struct eth_tx_start_bd *tx_start_bd,
3485 u32 xmit_type)
93ef5c02 3486{
93ef5c02
DK
3487 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3488
a848ade4 3489 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3490 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3491
3492 if (!(xmit_type & XMIT_CSUM_TCP))
3493 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3494}
3495
f2e0899f 3496/**
e8920674 3497 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3498 *
e8920674
DK
3499 * @bp: driver handle
3500 * @skb: packet skb
3501 * @pbd: parse BD to be updated
3502 * @xmit_type: xmit flags
f2e0899f 3503 */
91226790
DK
3504static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3505 struct eth_tx_parse_bd_e1x *pbd,
3506 u32 xmit_type)
f2e0899f 3507{
e39aece7 3508 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3509
3510 /* for now NS flag is not used in Linux */
3511 pbd->global_data =
86564c3f
YM
3512 cpu_to_le16(hlen |
3513 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3514 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3515
3516 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3517 skb_network_header(skb)) >> 1;
f2e0899f 3518
e39aece7
VZ
3519 hlen += pbd->ip_hlen_w;
3520
3521 /* We support checksum offload for TCP and UDP only */
3522 if (xmit_type & XMIT_CSUM_TCP)
3523 hlen += tcp_hdrlen(skb) / 2;
3524 else
3525 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3526
3527 pbd->total_hlen_w = cpu_to_le16(hlen);
3528 hlen = hlen*2;
3529
3530 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3531 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3532
3533 } else {
3534 s8 fix = SKB_CS_OFF(skb); /* signed! */
3535
3536 DP(NETIF_MSG_TX_QUEUED,
3537 "hlen %d fix %d csum before fix %x\n",
3538 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3539
3540 /* HW bug: fixup the CSUM */
3541 pbd->tcp_pseudo_csum =
3542 bnx2x_csum_fix(skb_transport_header(skb),
3543 SKB_CS(skb), fix);
3544
3545 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3546 pbd->tcp_pseudo_csum);
3547 }
3548
3549 return hlen;
3550}
f85582f8 3551
a848ade4
DK
3552static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3553 struct eth_tx_parse_bd_e2 *pbd_e2,
3554 struct eth_tx_parse_2nd_bd *pbd2,
3555 u16 *global_data,
3556 u32 xmit_type)
3557{
e287a75c 3558 u16 hlen_w = 0;
a848ade4 3559 u8 outerip_off, outerip_len = 0;
e768fb29 3560
e287a75c
DK
3561 /* from outer IP to transport */
3562 hlen_w = (skb_inner_transport_header(skb) -
3563 skb_network_header(skb)) >> 1;
a848ade4
DK
3564
3565 /* transport len */
e768fb29 3566 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3567
e287a75c 3568 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3569
e768fb29
DK
3570 /* outer IP header info */
3571 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3572 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3573 u32 csum = (__force u32)(~iph->check) -
3574 (__force u32)iph->tot_len -
3575 (__force u32)iph->frag_off;
c957d09f 3576
a848ade4 3577 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3578 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3579 } else {
3580 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3581 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3582 }
3583
3584 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3585
3586 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3587
3588 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3589 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3590
3591 pbd_e2->data.tunnel_data.pseudo_csum =
3592 bswab16(~csum_tcpudp_magic(
3593 inner_ip_hdr(skb)->saddr,
3594 inner_ip_hdr(skb)->daddr,
3595 0, IPPROTO_TCP, 0));
3596
3597 outerip_len = ip_hdr(skb)->ihl << 1;
3598 } else {
3599 pbd_e2->data.tunnel_data.pseudo_csum =
3600 bswab16(~csum_ipv6_magic(
3601 &inner_ipv6_hdr(skb)->saddr,
3602 &inner_ipv6_hdr(skb)->daddr,
3603 0, IPPROTO_TCP, 0));
3604 }
3605
3606 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3607
3608 *global_data |=
3609 outerip_off |
3610 (!!(xmit_type & XMIT_CSUM_V6) <<
3611 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3612 (outerip_len <<
3613 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3614 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3615 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3616
3617 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3618 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3619 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3620 }
a848ade4
DK
3621}
3622
9f6c9258
DK
3623/* called with netif_tx_lock
3624 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3625 * netif_wake_queue()
3626 */
3627netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3628{
3629 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3630
9f6c9258 3631 struct netdev_queue *txq;
6383c0b3 3632 struct bnx2x_fp_txdata *txdata;
9f6c9258 3633 struct sw_tx_bd *tx_buf;
619c5cb6 3634 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3635 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3636 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3637 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3638 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3639 u32 pbd_e2_parsing_data = 0;
9f6c9258 3640 u16 pkt_prod, bd_prod;
65565884 3641 int nbd, txq_index;
9f6c9258
DK
3642 dma_addr_t mapping;
3643 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3644 int i;
3645 u8 hlen = 0;
3646 __le16 pkt_size = 0;
3647 struct ethhdr *eth;
3648 u8 mac_type = UNICAST_ADDRESS;
3649
3650#ifdef BNX2X_STOP_ON_ERROR
3651 if (unlikely(bp->panic))
3652 return NETDEV_TX_BUSY;
3653#endif
3654
6383c0b3
AE
3655 txq_index = skb_get_queue_mapping(skb);
3656 txq = netdev_get_tx_queue(dev, txq_index);
3657
55c11941 3658 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3659
65565884 3660 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3661
3662 /* enable this debug print to view the transmission queue being used
51c1a580 3663 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3664 txq_index, fp_index, txdata_index); */
9f6c9258 3665
16a5fd92 3666 /* enable this debug print to view the transmission details
51c1a580
MS
3667 DP(NETIF_MSG_TX_QUEUED,
3668 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3669 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3670
6383c0b3 3671 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3672 skb_shinfo(skb)->nr_frags +
3673 BDS_PER_TX_PKT +
3674 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3675 /* Handle special storage cases separately */
c96bdc0c
DK
3676 if (txdata->tx_ring_size == 0) {
3677 struct bnx2x_eth_q_stats *q_stats =
3678 bnx2x_fp_qstats(bp, txdata->parent_fp);
3679 q_stats->driver_filtered_tx_pkt++;
3680 dev_kfree_skb(skb);
3681 return NETDEV_TX_OK;
3682 }
2de67439
YM
3683 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3684 netif_tx_stop_queue(txq);
c96bdc0c 3685 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3686
9f6c9258
DK
3687 return NETDEV_TX_BUSY;
3688 }
3689
51c1a580 3690 DP(NETIF_MSG_TX_QUEUED,
04c46736 3691 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3692 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3693 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3694 skb->len);
9f6c9258
DK
3695
3696 eth = (struct ethhdr *)skb->data;
3697
3698 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3699 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3700 if (is_broadcast_ether_addr(eth->h_dest))
3701 mac_type = BROADCAST_ADDRESS;
3702 else
3703 mac_type = MULTICAST_ADDRESS;
3704 }
3705
91226790 3706#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3707 /* First, check if we need to linearize the skb (due to FW
3708 restrictions). No need to check fragmentation if page size > 8K
3709 (there will be no violation to FW restrictions) */
3710 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3711 /* Statistics of linearization */
3712 bp->lin_cnt++;
3713 if (skb_linearize(skb) != 0) {
51c1a580
MS
3714 DP(NETIF_MSG_TX_QUEUED,
3715 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3716 dev_kfree_skb_any(skb);
3717 return NETDEV_TX_OK;
3718 }
3719 }
3720#endif
619c5cb6
VZ
3721 /* Map skb linear data for DMA */
3722 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3723 skb_headlen(skb), DMA_TO_DEVICE);
3724 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3725 DP(NETIF_MSG_TX_QUEUED,
3726 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3727 dev_kfree_skb_any(skb);
3728 return NETDEV_TX_OK;
3729 }
9f6c9258
DK
3730 /*
3731 Please read carefully. First we use one BD which we mark as start,
3732 then we have a parsing info BD (used for TSO or xsum),
3733 and only then we have the rest of the TSO BDs.
3734 (don't forget to mark the last one as last,
3735 and to unmap only AFTER you write to the BD ...)
3736 And above all, all pdb sizes are in words - NOT DWORDS!
3737 */
3738
619c5cb6
VZ
3739 /* get current pkt produced now - advance it just before sending packet
3740 * since mapping of pages may fail and cause packet to be dropped
3741 */
6383c0b3
AE
3742 pkt_prod = txdata->tx_pkt_prod;
3743 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3744
619c5cb6
VZ
3745 /* get a tx_buf and first BD
3746 * tx_start_bd may be changed during SPLIT,
3747 * but first_bd will always stay first
3748 */
6383c0b3
AE
3749 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3750 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3751 first_bd = tx_start_bd;
9f6c9258
DK
3752
3753 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3754
91226790
DK
3755 /* header nbd: indirectly zero other flags! */
3756 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3757
3758 /* remember the first BD of the packet */
6383c0b3 3759 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3760 tx_buf->skb = skb;
3761 tx_buf->flags = 0;
3762
3763 DP(NETIF_MSG_TX_QUEUED,
3764 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3765 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3766
eab6d18d 3767 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3768 tx_start_bd->vlan_or_ethertype =
3769 cpu_to_le16(vlan_tx_tag_get(skb));
3770 tx_start_bd->bd_flags.as_bitfield |=
3771 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3772 } else {
3773 /* when transmitting in a vf, start bd must hold the ethertype
3774 * for fw to enforce it
3775 */
91226790 3776 if (IS_VF(bp))
dc1ba591
AE
3777 tx_start_bd->vlan_or_ethertype =
3778 cpu_to_le16(ntohs(eth->h_proto));
91226790 3779 else
dc1ba591
AE
3780 /* used by FW for packet accounting */
3781 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3782 }
9f6c9258 3783
91226790
DK
3784 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3785
9f6c9258
DK
3786 /* turn on parsing and get a BD */
3787 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3788
93ef5c02
DK
3789 if (xmit_type & XMIT_CSUM)
3790 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3791
619c5cb6 3792 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3793 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3794 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3795
3796 if (xmit_type & XMIT_CSUM_ENC) {
3797 u16 global_data = 0;
3798
3799 /* Set PBD in enc checksum offload case */
3800 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3801 &pbd_e2_parsing_data,
3802 xmit_type);
3803
3804 /* turn on 2nd parsing and get a BD */
3805 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3806
3807 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3808
3809 memset(pbd2, 0, sizeof(*pbd2));
3810
3811 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3812 (skb_inner_network_header(skb) -
3813 skb->data) >> 1;
3814
3815 if (xmit_type & XMIT_GSO_ENC)
3816 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3817 &global_data,
3818 xmit_type);
3819
3820 pbd2->global_data = cpu_to_le16(global_data);
3821
3822 /* add addition parse BD indication to start BD */
3823 SET_FLAG(tx_start_bd->general_data,
3824 ETH_TX_START_BD_PARSE_NBDS, 1);
3825 /* set encapsulation flag in start BD */
3826 SET_FLAG(tx_start_bd->general_data,
3827 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3828 nbd++;
3829 } else if (xmit_type & XMIT_CSUM) {
91226790 3830 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3831 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3832 &pbd_e2_parsing_data,
3833 xmit_type);
a848ade4 3834 }
dc1ba591 3835
91226790
DK
3836 /* Add the macs to the parsing BD this is a vf */
3837 if (IS_VF(bp)) {
3838 /* override GRE parameters in BD */
3839 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3840 &pbd_e2->data.mac_addr.src_mid,
3841 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3842 eth->h_source);
91226790
DK
3843
3844 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3845 &pbd_e2->data.mac_addr.dst_mid,
3846 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3847 eth->h_dest);
3848 }
96bed4b9
YM
3849
3850 SET_FLAG(pbd_e2_parsing_data,
3851 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3852 } else {
96bed4b9 3853 u16 global_data = 0;
6383c0b3 3854 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3855 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3856 /* Set PBD in checksum offload case */
3857 if (xmit_type & XMIT_CSUM)
3858 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3859
96bed4b9
YM
3860 SET_FLAG(global_data,
3861 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3862 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3863 }
3864
f85582f8 3865 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3866 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3867 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3868 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3869 pkt_size = tx_start_bd->nbytes;
3870
51c1a580 3871 DP(NETIF_MSG_TX_QUEUED,
91226790 3872 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3873 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3874 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3875 tx_start_bd->bd_flags.as_bitfield,
3876 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3877
3878 if (xmit_type & XMIT_GSO) {
3879
3880 DP(NETIF_MSG_TX_QUEUED,
3881 "TSO packet len %d hlen %d total len %d tso size %d\n",
3882 skb->len, hlen, skb_headlen(skb),
3883 skb_shinfo(skb)->gso_size);
3884
3885 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3886
91226790
DK
3887 if (unlikely(skb_headlen(skb) > hlen)) {
3888 nbd++;
6383c0b3
AE
3889 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3890 &tx_start_bd, hlen,
91226790
DK
3891 bd_prod);
3892 }
619c5cb6 3893 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3894 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3895 xmit_type);
f2e0899f 3896 else
44dbc78e 3897 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
9f6c9258 3898 }
2297a2da
VZ
3899
3900 /* Set the PBD's parsing_data field if not zero
3901 * (for the chips newer than 57711).
3902 */
3903 if (pbd_e2_parsing_data)
3904 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3905
9f6c9258
DK
3906 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3907
f85582f8 3908 /* Handle fragmented skb */
9f6c9258
DK
3909 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3910 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3911
9e903e08
ED
3912 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3913 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3914 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3915 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3916
51c1a580
MS
3917 DP(NETIF_MSG_TX_QUEUED,
3918 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3919
3920 /* we need unmap all buffers already mapped
3921 * for this SKB;
3922 * first_bd->nbd need to be properly updated
3923 * before call to bnx2x_free_tx_pkt
3924 */
3925 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3926 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3927 TX_BD(txdata->tx_pkt_prod),
3928 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3929 return NETDEV_TX_OK;
3930 }
3931
9f6c9258 3932 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3933 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3934 if (total_pkt_bd == NULL)
6383c0b3 3935 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3936
9f6c9258
DK
3937 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3938 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3939 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3940 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3941 nbd++;
9f6c9258
DK
3942
3943 DP(NETIF_MSG_TX_QUEUED,
3944 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3945 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3946 le16_to_cpu(tx_data_bd->nbytes));
3947 }
3948
3949 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3950
619c5cb6
VZ
3951 /* update with actual num BDs */
3952 first_bd->nbd = cpu_to_le16(nbd);
3953
9f6c9258
DK
3954 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3955
3956 /* now send a tx doorbell, counting the next BD
3957 * if the packet contains or ends with it
3958 */
3959 if (TX_BD_POFF(bd_prod) < nbd)
3960 nbd++;
3961
619c5cb6
VZ
3962 /* total_pkt_bytes should be set on the first data BD if
3963 * it's not an LSO packet and there is more than one
3964 * data BD. In this case pkt_size is limited by an MTU value.
3965 * However we prefer to set it for an LSO packet (while we don't
3966 * have to) in order to save some CPU cycles in a none-LSO
3967 * case, when we much more care about them.
3968 */
9f6c9258
DK
3969 if (total_pkt_bd != NULL)
3970 total_pkt_bd->total_pkt_bytes = pkt_size;
3971
523224a3 3972 if (pbd_e1x)
9f6c9258 3973 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3974 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3975 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3976 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3977 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3978 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3979 if (pbd_e2)
3980 DP(NETIF_MSG_TX_QUEUED,
3981 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
3982 pbd_e2,
3983 pbd_e2->data.mac_addr.dst_hi,
3984 pbd_e2->data.mac_addr.dst_mid,
3985 pbd_e2->data.mac_addr.dst_lo,
3986 pbd_e2->data.mac_addr.src_hi,
3987 pbd_e2->data.mac_addr.src_mid,
3988 pbd_e2->data.mac_addr.src_lo,
f2e0899f 3989 pbd_e2->parsing_data);
9f6c9258
DK
3990 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3991
2df1a70a
TH
3992 netdev_tx_sent_queue(txq, skb->len);
3993
8373c57d
WB
3994 skb_tx_timestamp(skb);
3995
6383c0b3 3996 txdata->tx_pkt_prod++;
9f6c9258
DK
3997 /*
3998 * Make sure that the BD data is updated before updating the producer
3999 * since FW might read the BD right after the producer is updated.
4000 * This is only applicable for weak-ordered memory model archs such
4001 * as IA-64. The following barrier is also mandatory since FW will
4002 * assumes packets must have BDs.
4003 */
4004 wmb();
4005
6383c0b3 4006 txdata->tx_db.data.prod += nbd;
9f6c9258 4007 barrier();
f85582f8 4008
6383c0b3 4009 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4010
4011 mmiowb();
4012
6383c0b3 4013 txdata->tx_bd_prod += nbd;
9f6c9258 4014
7df2dc6b 4015 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4016 netif_tx_stop_queue(txq);
4017
4018 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4019 * ordering of set_bit() in netif_tx_stop_queue() and read of
4020 * fp->bd_tx_cons */
4021 smp_mb();
4022
15192a8c 4023 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4024 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4025 netif_tx_wake_queue(txq);
4026 }
6383c0b3 4027 txdata->tx_pkt++;
9f6c9258
DK
4028
4029 return NETDEV_TX_OK;
4030}
f85582f8 4031
6383c0b3
AE
4032/**
4033 * bnx2x_setup_tc - routine to configure net_device for multi tc
4034 *
4035 * @netdev: net device to configure
4036 * @tc: number of traffic classes to enable
4037 *
4038 * callback connected to the ndo_setup_tc function pointer
4039 */
4040int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4041{
4042 int cos, prio, count, offset;
4043 struct bnx2x *bp = netdev_priv(dev);
4044
4045 /* setup tc must be called under rtnl lock */
4046 ASSERT_RTNL();
4047
16a5fd92 4048 /* no traffic classes requested. Aborting */
6383c0b3
AE
4049 if (!num_tc) {
4050 netdev_reset_tc(dev);
4051 return 0;
4052 }
4053
4054 /* requested to support too many traffic classes */
4055 if (num_tc > bp->max_cos) {
6bf07b8e 4056 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4057 num_tc, bp->max_cos);
6383c0b3
AE
4058 return -EINVAL;
4059 }
4060
4061 /* declare amount of supported traffic classes */
4062 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4063 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4064 return -EINVAL;
4065 }
4066
4067 /* configure priority to traffic class mapping */
4068 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4069 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4070 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4071 "mapping priority %d to tc %d\n",
6383c0b3
AE
4072 prio, bp->prio_to_cos[prio]);
4073 }
4074
16a5fd92 4075 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4076 This can be used for ets or pfc, and save the effort of setting
4077 up a multio class queue disc or negotiating DCBX with a switch
4078 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4079 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4080 for (prio = 1; prio < 16; prio++) {
4081 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4082 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4083 } */
4084
4085 /* configure traffic class to transmission queue mapping */
4086 for (cos = 0; cos < bp->max_cos; cos++) {
4087 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4088 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4089 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4090 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4091 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4092 cos, offset, count);
4093 }
4094
4095 return 0;
4096}
4097
9f6c9258
DK
4098/* called with rtnl_lock */
4099int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4100{
4101 struct sockaddr *addr = p;
4102 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4103 int rc = 0;
9f6c9258 4104
51c1a580
MS
4105 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4106 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4107 return -EINVAL;
51c1a580 4108 }
614c76df 4109
a3348722
BW
4110 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4111 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4112 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4113 return -EINVAL;
51c1a580 4114 }
9f6c9258 4115
619c5cb6
VZ
4116 if (netif_running(dev)) {
4117 rc = bnx2x_set_eth_mac(bp, false);
4118 if (rc)
4119 return rc;
4120 }
4121
9f6c9258 4122 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4123
523224a3 4124 if (netif_running(dev))
619c5cb6 4125 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4126
619c5cb6 4127 return rc;
9f6c9258
DK
4128}
4129
b3b83c3f
DK
4130static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4131{
4132 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4133 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4134 u8 cos;
b3b83c3f
DK
4135
4136 /* Common */
55c11941 4137
b3b83c3f
DK
4138 if (IS_FCOE_IDX(fp_index)) {
4139 memset(sb, 0, sizeof(union host_hc_status_block));
4140 fp->status_blk_mapping = 0;
b3b83c3f 4141 } else {
b3b83c3f 4142 /* status blocks */
619c5cb6 4143 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4144 BNX2X_PCI_FREE(sb->e2_sb,
4145 bnx2x_fp(bp, fp_index,
4146 status_blk_mapping),
4147 sizeof(struct host_hc_status_block_e2));
4148 else
4149 BNX2X_PCI_FREE(sb->e1x_sb,
4150 bnx2x_fp(bp, fp_index,
4151 status_blk_mapping),
4152 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4153 }
55c11941 4154
b3b83c3f
DK
4155 /* Rx */
4156 if (!skip_rx_queue(bp, fp_index)) {
4157 bnx2x_free_rx_bds(fp);
4158
4159 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4160 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4161 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4162 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4163 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4164
4165 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4166 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4167 sizeof(struct eth_fast_path_rx_cqe) *
4168 NUM_RCQ_BD);
4169
4170 /* SGE ring */
4171 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4172 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4173 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4174 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4175 }
4176
4177 /* Tx */
4178 if (!skip_tx_queue(bp, fp_index)) {
4179 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4180 for_each_cos_in_tx_queue(fp, cos) {
65565884 4181 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4182
51c1a580 4183 DP(NETIF_MSG_IFDOWN,
94f05b0f 4184 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4185 fp_index, cos, txdata->cid);
4186
4187 BNX2X_FREE(txdata->tx_buf_ring);
4188 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4189 txdata->tx_desc_mapping,
4190 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4191 }
b3b83c3f
DK
4192 }
4193 /* end of fastpath */
4194}
4195
55c11941
MS
4196void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4197{
4198 int i;
4199 for_each_cnic_queue(bp, i)
4200 bnx2x_free_fp_mem_at(bp, i);
4201}
4202
b3b83c3f
DK
4203void bnx2x_free_fp_mem(struct bnx2x *bp)
4204{
4205 int i;
55c11941 4206 for_each_eth_queue(bp, i)
b3b83c3f
DK
4207 bnx2x_free_fp_mem_at(bp, i);
4208}
4209
1191cb83 4210static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4211{
4212 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4213 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4214 bnx2x_fp(bp, index, sb_index_values) =
4215 (__le16 *)status_blk.e2_sb->sb.index_values;
4216 bnx2x_fp(bp, index, sb_running_index) =
4217 (__le16 *)status_blk.e2_sb->sb.running_index;
4218 } else {
4219 bnx2x_fp(bp, index, sb_index_values) =
4220 (__le16 *)status_blk.e1x_sb->sb.index_values;
4221 bnx2x_fp(bp, index, sb_running_index) =
4222 (__le16 *)status_blk.e1x_sb->sb.running_index;
4223 }
4224}
4225
1191cb83
ED
4226/* Returns the number of actually allocated BDs */
4227static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4228 int rx_ring_size)
4229{
4230 struct bnx2x *bp = fp->bp;
4231 u16 ring_prod, cqe_ring_prod;
4232 int i, failure_cnt = 0;
4233
4234 fp->rx_comp_cons = 0;
4235 cqe_ring_prod = ring_prod = 0;
4236
4237 /* This routine is called only during fo init so
4238 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4239 */
4240 for (i = 0; i < rx_ring_size; i++) {
996dedba 4241 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4242 failure_cnt++;
4243 continue;
4244 }
4245 ring_prod = NEXT_RX_IDX(ring_prod);
4246 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4247 WARN_ON(ring_prod <= (i - failure_cnt));
4248 }
4249
4250 if (failure_cnt)
4251 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4252 i - failure_cnt, fp->index);
4253
4254 fp->rx_bd_prod = ring_prod;
4255 /* Limit the CQE producer by the CQE ring size */
4256 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4257 cqe_ring_prod);
4258 fp->rx_pkt = fp->rx_calls = 0;
4259
15192a8c 4260 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4261
4262 return i - failure_cnt;
4263}
4264
4265static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4266{
4267 int i;
4268
4269 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4270 struct eth_rx_cqe_next_page *nextpg;
4271
4272 nextpg = (struct eth_rx_cqe_next_page *)
4273 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4274 nextpg->addr_hi =
4275 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4276 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4277 nextpg->addr_lo =
4278 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4279 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4280 }
4281}
4282
b3b83c3f
DK
4283static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4284{
4285 union host_hc_status_block *sb;
4286 struct bnx2x_fastpath *fp = &bp->fp[index];
4287 int ring_size = 0;
6383c0b3 4288 u8 cos;
c2188952 4289 int rx_ring_size = 0;
b3b83c3f 4290
a3348722
BW
4291 if (!bp->rx_ring_size &&
4292 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4293 rx_ring_size = MIN_RX_SIZE_NONTPA;
4294 bp->rx_ring_size = rx_ring_size;
55c11941 4295 } else if (!bp->rx_ring_size) {
c2188952
VZ
4296 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4297
065f8b92
YM
4298 if (CHIP_IS_E3(bp)) {
4299 u32 cfg = SHMEM_RD(bp,
4300 dev_info.port_hw_config[BP_PORT(bp)].
4301 default_cfg);
4302
4303 /* Decrease ring size for 1G functions */
4304 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4305 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4306 rx_ring_size /= 10;
4307 }
d760fc37 4308
c2188952
VZ
4309 /* allocate at least number of buffers required by FW */
4310 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4311 MIN_RX_SIZE_TPA, rx_ring_size);
4312
4313 bp->rx_ring_size = rx_ring_size;
614c76df 4314 } else /* if rx_ring_size specified - use it */
c2188952 4315 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4316
04c46736
YM
4317 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4318
b3b83c3f
DK
4319 /* Common */
4320 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4321
b3b83c3f 4322 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4323 /* status blocks */
619c5cb6 4324 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4325 BNX2X_PCI_ALLOC(sb->e2_sb,
4326 &bnx2x_fp(bp, index, status_blk_mapping),
4327 sizeof(struct host_hc_status_block_e2));
4328 else
4329 BNX2X_PCI_ALLOC(sb->e1x_sb,
4330 &bnx2x_fp(bp, index, status_blk_mapping),
4331 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4332 }
8eef2af1
DK
4333
4334 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4335 * set shortcuts for it.
4336 */
4337 if (!IS_FCOE_IDX(index))
4338 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4339
4340 /* Tx */
4341 if (!skip_tx_queue(bp, index)) {
4342 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4343 for_each_cos_in_tx_queue(fp, cos) {
65565884 4344 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4345
51c1a580
MS
4346 DP(NETIF_MSG_IFUP,
4347 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4348 index, cos);
4349
4350 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4351 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4352 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4353 &txdata->tx_desc_mapping,
b3b83c3f 4354 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4355 }
b3b83c3f
DK
4356 }
4357
4358 /* Rx */
4359 if (!skip_rx_queue(bp, index)) {
4360 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4361 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4362 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4363 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4364 &bnx2x_fp(bp, index, rx_desc_mapping),
4365 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4366
75b29459
DK
4367 /* Seed all CQEs by 1s */
4368 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4369 &bnx2x_fp(bp, index, rx_comp_mapping),
4370 sizeof(struct eth_fast_path_rx_cqe) *
4371 NUM_RCQ_BD);
b3b83c3f
DK
4372
4373 /* SGE ring */
4374 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4375 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4376 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4377 &bnx2x_fp(bp, index, rx_sge_mapping),
4378 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4379 /* RX BD ring */
4380 bnx2x_set_next_page_rx_bd(fp);
4381
4382 /* CQ ring */
4383 bnx2x_set_next_page_rx_cq(fp);
4384
4385 /* BDs */
4386 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4387 if (ring_size < rx_ring_size)
4388 goto alloc_mem_err;
4389 }
4390
4391 return 0;
4392
4393/* handles low memory cases */
4394alloc_mem_err:
4395 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4396 index, ring_size);
4397 /* FW will drop all packets if queue is not big enough,
4398 * In these cases we disable the queue
6383c0b3 4399 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4400 */
4401 if (ring_size < (fp->disable_tpa ?
eb722d7a 4402 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4403 /* release memory allocated for this queue */
4404 bnx2x_free_fp_mem_at(bp, index);
4405 return -ENOMEM;
4406 }
4407 return 0;
4408}
4409
55c11941
MS
4410int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4411{
4412 if (!NO_FCOE(bp))
4413 /* FCoE */
4414 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4415 /* we will fail load process instead of mark
4416 * NO_FCOE_FLAG
4417 */
4418 return -ENOMEM;
4419
4420 return 0;
4421}
4422
b3b83c3f
DK
4423int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4424{
4425 int i;
4426
55c11941
MS
4427 /* 1. Allocate FP for leading - fatal if error
4428 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4429 */
4430
4431 /* leading */
4432 if (bnx2x_alloc_fp_mem_at(bp, 0))
4433 return -ENOMEM;
6383c0b3 4434
b3b83c3f
DK
4435 /* RSS */
4436 for_each_nondefault_eth_queue(bp, i)
4437 if (bnx2x_alloc_fp_mem_at(bp, i))
4438 break;
4439
4440 /* handle memory failures */
4441 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4442 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4443
4444 WARN_ON(delta < 0);
4864a16a 4445 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4446 if (CNIC_SUPPORT(bp))
4447 /* move non eth FPs next to last eth FP
4448 * must be done in that order
4449 * FCOE_IDX < FWD_IDX < OOO_IDX
4450 */
b3b83c3f 4451
55c11941
MS
4452 /* move FCoE fp even NO_FCOE_FLAG is on */
4453 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4454 bp->num_ethernet_queues -= delta;
4455 bp->num_queues = bp->num_ethernet_queues +
4456 bp->num_cnic_queues;
b3b83c3f
DK
4457 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4458 bp->num_queues + delta, bp->num_queues);
4459 }
4460
4461 return 0;
4462}
d6214d7a 4463
523224a3
DK
4464void bnx2x_free_mem_bp(struct bnx2x *bp)
4465{
c3146eb6
DK
4466 int i;
4467
4468 for (i = 0; i < bp->fp_array_size; i++)
4469 kfree(bp->fp[i].tpa_info);
523224a3 4470 kfree(bp->fp);
15192a8c
BW
4471 kfree(bp->sp_objs);
4472 kfree(bp->fp_stats);
65565884 4473 kfree(bp->bnx2x_txq);
523224a3
DK
4474 kfree(bp->msix_table);
4475 kfree(bp->ilt);
4476}
4477
0329aba1 4478int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4479{
4480 struct bnx2x_fastpath *fp;
4481 struct msix_entry *tbl;
4482 struct bnx2x_ilt *ilt;
6383c0b3 4483 int msix_table_size = 0;
55c11941 4484 int fp_array_size, txq_array_size;
15192a8c 4485 int i;
6383c0b3
AE
4486
4487 /*
4488 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4489 * path IGU SBs plus default SB (for PF only).
6383c0b3 4490 */
1ab4434c
AE
4491 msix_table_size = bp->igu_sb_cnt;
4492 if (IS_PF(bp))
4493 msix_table_size++;
4494 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4495
6383c0b3 4496 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4497 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4498 bp->fp_array_size = fp_array_size;
4499 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4500
c3146eb6 4501 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4502 if (!fp)
4503 goto alloc_err;
c3146eb6 4504 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4505 fp[i].tpa_info =
4506 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4507 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4508 if (!(fp[i].tpa_info))
4509 goto alloc_err;
4510 }
4511
523224a3
DK
4512 bp->fp = fp;
4513
15192a8c 4514 /* allocate sp objs */
c3146eb6 4515 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4516 GFP_KERNEL);
4517 if (!bp->sp_objs)
4518 goto alloc_err;
4519
4520 /* allocate fp_stats */
c3146eb6 4521 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4522 GFP_KERNEL);
4523 if (!bp->fp_stats)
4524 goto alloc_err;
4525
65565884 4526 /* Allocate memory for the transmission queues array */
55c11941
MS
4527 txq_array_size =
4528 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4529 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4530
4531 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4532 GFP_KERNEL);
65565884
MS
4533 if (!bp->bnx2x_txq)
4534 goto alloc_err;
4535
523224a3 4536 /* msix table */
01e23742 4537 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4538 if (!tbl)
4539 goto alloc_err;
4540 bp->msix_table = tbl;
4541
4542 /* ilt */
4543 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4544 if (!ilt)
4545 goto alloc_err;
4546 bp->ilt = ilt;
4547
4548 return 0;
4549alloc_err:
4550 bnx2x_free_mem_bp(bp);
4551 return -ENOMEM;
523224a3
DK
4552}
4553
a9fccec7 4554int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4555{
4556 struct bnx2x *bp = netdev_priv(dev);
4557
4558 if (unlikely(!netif_running(dev)))
4559 return 0;
4560
5d07d868 4561 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4562 return bnx2x_nic_load(bp, LOAD_NORMAL);
4563}
4564
1ac9e428
YR
4565int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4566{
4567 u32 sel_phy_idx = 0;
4568 if (bp->link_params.num_phys <= 1)
4569 return INT_PHY;
4570
4571 if (bp->link_vars.link_up) {
4572 sel_phy_idx = EXT_PHY1;
4573 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4574 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4575 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4576 sel_phy_idx = EXT_PHY2;
4577 } else {
4578
4579 switch (bnx2x_phy_selection(&bp->link_params)) {
4580 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4581 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4582 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4583 sel_phy_idx = EXT_PHY1;
4584 break;
4585 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4586 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4587 sel_phy_idx = EXT_PHY2;
4588 break;
4589 }
4590 }
4591
4592 return sel_phy_idx;
1ac9e428
YR
4593}
4594int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4595{
4596 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4597 /*
2de67439 4598 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4599 * swapping is enabled). So when swapping is enabled, we need to reverse
4600 * the configuration
4601 */
4602
4603 if (bp->link_params.multi_phy_config &
4604 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4605 if (sel_phy_idx == EXT_PHY1)
4606 sel_phy_idx = EXT_PHY2;
4607 else if (sel_phy_idx == EXT_PHY2)
4608 sel_phy_idx = EXT_PHY1;
4609 }
4610 return LINK_CONFIG_IDX(sel_phy_idx);
4611}
4612
55c11941 4613#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4614int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4615{
4616 struct bnx2x *bp = netdev_priv(dev);
4617 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4618
4619 switch (type) {
4620 case NETDEV_FCOE_WWNN:
4621 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4622 cp->fcoe_wwn_node_name_lo);
4623 break;
4624 case NETDEV_FCOE_WWPN:
4625 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4626 cp->fcoe_wwn_port_name_lo);
4627 break;
4628 default:
51c1a580 4629 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4630 return -EINVAL;
4631 }
4632
4633 return 0;
4634}
4635#endif
4636
9f6c9258
DK
4637/* called with rtnl_lock */
4638int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4639{
4640 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4641
4642 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4643 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4644 return -EAGAIN;
4645 }
4646
4647 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4648 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4649 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4650 return -EINVAL;
51c1a580 4651 }
9f6c9258
DK
4652
4653 /* This does not race with packet allocation
4654 * because the actual alloc size is
4655 * only updated as part of load
4656 */
4657 dev->mtu = new_mtu;
4658
66371c44
MM
4659 return bnx2x_reload_if_running(dev);
4660}
4661
c8f44aff 4662netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4663 netdev_features_t features)
66371c44
MM
4664{
4665 struct bnx2x *bp = netdev_priv(dev);
4666
4667 /* TPA requires Rx CSUM offloading */
621b4d66 4668 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4669 features &= ~NETIF_F_LRO;
621b4d66
DK
4670 features &= ~NETIF_F_GRO;
4671 }
66371c44
MM
4672
4673 return features;
4674}
4675
c8f44aff 4676int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4677{
4678 struct bnx2x *bp = netdev_priv(dev);
4679 u32 flags = bp->flags;
8802f579 4680 u32 changes;
538dd2e3 4681 bool bnx2x_reload = false;
66371c44
MM
4682
4683 if (features & NETIF_F_LRO)
4684 flags |= TPA_ENABLE_FLAG;
4685 else
4686 flags &= ~TPA_ENABLE_FLAG;
4687
621b4d66
DK
4688 if (features & NETIF_F_GRO)
4689 flags |= GRO_ENABLE_FLAG;
4690 else
4691 flags &= ~GRO_ENABLE_FLAG;
4692
538dd2e3
MB
4693 if (features & NETIF_F_LOOPBACK) {
4694 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4695 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4696 bnx2x_reload = true;
4697 }
4698 } else {
4699 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4700 bp->link_params.loopback_mode = LOOPBACK_NONE;
4701 bnx2x_reload = true;
4702 }
4703 }
4704
8802f579
ED
4705 changes = flags ^ bp->flags;
4706
16a5fd92 4707 /* if GRO is changed while LRO is enabled, don't force a reload */
8802f579
ED
4708 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4709 changes &= ~GRO_ENABLE_FLAG;
4710
4711 if (changes)
538dd2e3 4712 bnx2x_reload = true;
8802f579
ED
4713
4714 bp->flags = flags;
66371c44 4715
538dd2e3 4716 if (bnx2x_reload) {
66371c44
MM
4717 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4718 return bnx2x_reload_if_running(dev);
4719 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4720 }
4721
66371c44 4722 return 0;
9f6c9258
DK
4723}
4724
4725void bnx2x_tx_timeout(struct net_device *dev)
4726{
4727 struct bnx2x *bp = netdev_priv(dev);
4728
4729#ifdef BNX2X_STOP_ON_ERROR
4730 if (!bp->panic)
4731 bnx2x_panic();
4732#endif
7be08a72
AE
4733
4734 smp_mb__before_clear_bit();
4735 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4736 smp_mb__after_clear_bit();
4737
9f6c9258 4738 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4739 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4740}
4741
9f6c9258
DK
4742int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4743{
4744 struct net_device *dev = pci_get_drvdata(pdev);
4745 struct bnx2x *bp;
4746
4747 if (!dev) {
4748 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4749 return -ENODEV;
4750 }
4751 bp = netdev_priv(dev);
4752
4753 rtnl_lock();
4754
4755 pci_save_state(pdev);
4756
4757 if (!netif_running(dev)) {
4758 rtnl_unlock();
4759 return 0;
4760 }
4761
4762 netif_device_detach(dev);
4763
5d07d868 4764 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4765
4766 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4767
4768 rtnl_unlock();
4769
4770 return 0;
4771}
4772
4773int bnx2x_resume(struct pci_dev *pdev)
4774{
4775 struct net_device *dev = pci_get_drvdata(pdev);
4776 struct bnx2x *bp;
4777 int rc;
4778
4779 if (!dev) {
4780 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4781 return -ENODEV;
4782 }
4783 bp = netdev_priv(dev);
4784
4785 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4786 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4787 return -EAGAIN;
4788 }
4789
4790 rtnl_lock();
4791
4792 pci_restore_state(pdev);
4793
4794 if (!netif_running(dev)) {
4795 rtnl_unlock();
4796 return 0;
4797 }
4798
4799 bnx2x_set_power_state(bp, PCI_D0);
4800 netif_device_attach(dev);
4801
4802 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4803
4804 rtnl_unlock();
4805
4806 return rc;
4807}
619c5cb6 4808
619c5cb6
VZ
4809void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4810 u32 cid)
4811{
b9871bcf
AE
4812 if (!cxt) {
4813 BNX2X_ERR("bad context pointer %p\n", cxt);
4814 return;
4815 }
4816
619c5cb6
VZ
4817 /* ustorm cxt validation */
4818 cxt->ustorm_ag_context.cdu_usage =
4819 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4820 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4821 /* xcontext validation */
4822 cxt->xstorm_ag_context.cdu_reserved =
4823 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4824 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4825}
4826
1191cb83
ED
4827static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4828 u8 fw_sb_id, u8 sb_index,
4829 u8 ticks)
619c5cb6 4830{
619c5cb6
VZ
4831 u32 addr = BAR_CSTRORM_INTMEM +
4832 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4833 REG_WR8(bp, addr, ticks);
51c1a580
MS
4834 DP(NETIF_MSG_IFUP,
4835 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4836 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4837}
4838
1191cb83
ED
4839static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4840 u16 fw_sb_id, u8 sb_index,
4841 u8 disable)
619c5cb6
VZ
4842{
4843 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4844 u32 addr = BAR_CSTRORM_INTMEM +
4845 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 4846 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
4847 /* clear and set */
4848 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4849 flags |= enable_flag;
0c14e5ce 4850 REG_WR8(bp, addr, flags);
51c1a580
MS
4851 DP(NETIF_MSG_IFUP,
4852 "port %x fw_sb_id %d sb_index %d disable %d\n",
4853 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4854}
4855
4856void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4857 u8 sb_index, u8 disable, u16 usec)
4858{
4859 int port = BP_PORT(bp);
4860 u8 ticks = usec / BNX2X_BTR;
4861
4862 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4863
4864 disable = disable ? 1 : (usec ? 0 : 1);
4865 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4866}