bnx2x: Fix configuration of doorbell block
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
076bb0c8 27#include <net/busy_poll.h>
c0cba59e 28#include <linux/prefetch.h>
9f6c9258 29#include "bnx2x_cmn.h"
523224a3 30#include "bnx2x_init.h"
042181f5 31#include "bnx2x_sp.h"
9f6c9258 32
b3b83c3f
DK
33/**
34 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
65565884
MS
43 * source onto the target. Update txdata pointers and related
44 * content.
b3b83c3f
DK
45 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 56 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
57
58 /* Copy the NAPI object as it has been already initialized */
59 from_fp->napi = to_fp->napi;
60
b3b83c3f
DK
61 /* Move bnx2x_fastpath contents */
62 memcpy(to_fp, from_fp, sizeof(*to_fp));
63 to_fp->index = to;
65565884 64
34d5626a
YM
65 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
67 */
68 to_fp->tpa_info = old_tpa_info;
69
15192a8c
BW
70 /* move sp_objs contents as well, as their indices match fp ones */
71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
72
73 /* move fp_stats contents as well, as their indices match fp ones */
74 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
75
65565884
MS
76 /* Update txdata pointers in fp and move txdata content accordingly:
77 * Each fp consumes 'max_cos' txdata structures, so the index should be
78 * decremented by max_cos x delta.
79 */
80
81 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
82 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
83 (bp)->max_cos;
84 if (from == FCOE_IDX(bp)) {
85 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
86 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
87 }
88
4864a16a
YM
89 memcpy(&bp->bnx2x_txq[new_txdata_index],
90 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
91 sizeof(struct bnx2x_fp_txdata));
92 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
93}
94
8ca5e17e
AE
95/**
96 * bnx2x_fill_fw_str - Fill buffer with FW version string.
97 *
98 * @bp: driver handle
99 * @buf: character buffer to fill with the fw name
100 * @buf_len: length of the above buffer
101 *
102 */
103void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
104{
105 if (IS_PF(bp)) {
106 u8 phy_fw_ver[PHY_FW_VER_LEN];
107
108 phy_fw_ver[0] = '\0';
109 bnx2x_get_ext_phy_fw_version(&bp->link_params,
110 phy_fw_ver, PHY_FW_VER_LEN);
111 strlcpy(buf, bp->fw_ver, buf_len);
112 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
113 "bc %d.%d.%d%s%s",
114 (bp->common.bc_ver & 0xff0000) >> 16,
115 (bp->common.bc_ver & 0xff00) >> 8,
116 (bp->common.bc_ver & 0xff),
117 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
118 } else {
6411280a 119 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
120 }
121}
122
4864a16a
YM
123/**
124 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
125 *
126 * @bp: driver handle
127 * @delta: number of eth queues which were not allocated
128 */
129static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
130{
131 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
132
133 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 134 * backward along the array could cause memory to be overridden
4864a16a
YM
135 */
136 for (cos = 1; cos < bp->max_cos; cos++) {
137 for (i = 0; i < old_eth_num - delta; i++) {
138 struct bnx2x_fastpath *fp = &bp->fp[i];
139 int new_idx = cos * (old_eth_num - delta) + i;
140
141 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
142 sizeof(struct bnx2x_fp_txdata));
143 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
144 }
145 }
146}
147
619c5cb6
VZ
148int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
149
9f6c9258
DK
150/* free skb in the packet ring at pos idx
151 * return idx of last bd freed
152 */
6383c0b3 153static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
154 u16 idx, unsigned int *pkts_compl,
155 unsigned int *bytes_compl)
9f6c9258 156{
6383c0b3 157 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
158 struct eth_tx_start_bd *tx_start_bd;
159 struct eth_tx_bd *tx_data_bd;
160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd;
163
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end);
166
51c1a580 167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 168 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
169
170 /* unmap first bd */
6383c0b3 171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
174
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR
177 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
178 BNX2X_ERR("BAD nbd!\n");
179 bnx2x_panic();
180 }
181#endif
182 new_cons = nbd + tx_buf->first_bd;
183
184 /* Get the next bd */
185 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
186
187 /* Skip a parse bd... */
188 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190
191 /* ...and the TSO split header bd since they have no mapping */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 }
196
197 /* now free frags */
198 while (nbd > 0) {
199
6383c0b3 200 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
201 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
202 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
203 if (--nbd)
204 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
205 }
206
207 /* release skb */
208 WARN_ON(!skb);
d8290ae5 209 if (likely(skb)) {
2df1a70a
TH
210 (*pkts_compl)++;
211 (*bytes_compl) += skb->len;
212 }
d8290ae5 213
40955532 214 dev_kfree_skb_any(skb);
9f6c9258
DK
215 tx_buf->first_bd = 0;
216 tx_buf->skb = NULL;
217
218 return new_cons;
219}
220
6383c0b3 221int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 222{
9f6c9258 223 struct netdev_queue *txq;
6383c0b3 224 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 225 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
226
227#ifdef BNX2X_STOP_ON_ERROR
228 if (unlikely(bp->panic))
229 return -1;
230#endif
231
6383c0b3
AE
232 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
233 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
234 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
235
236 while (sw_cons != hw_cons) {
237 u16 pkt_cons;
238
239 pkt_cons = TX_BD(sw_cons);
240
51c1a580
MS
241 DP(NETIF_MSG_TX_DONE,
242 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 243 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 244
2df1a70a 245 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 246 &pkts_compl, &bytes_compl);
2df1a70a 247
9f6c9258
DK
248 sw_cons++;
249 }
250
2df1a70a
TH
251 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
252
6383c0b3
AE
253 txdata->tx_pkt_cons = sw_cons;
254 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
255
256 /* Need to make the tx_bd_cons update visible to start_xmit()
257 * before checking for netif_tx_queue_stopped(). Without the
258 * memory barrier, there is a small possibility that
259 * start_xmit() will miss it and cause the queue to be stopped
260 * forever.
619c5cb6
VZ
261 * On the other hand we need an rmb() here to ensure the proper
262 * ordering of bit testing in the following
263 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
264 */
265 smp_mb();
266
9f6c9258 267 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 268 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
269 * while it's empty. This could have happen if rx_action() gets
270 * suspended in bnx2x_tx_int() after the condition before
271 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
272 *
273 * stops the queue->sees fresh tx_bd_cons->releases the queue->
274 * sends some packets consuming the whole queue again->
275 * stops the queue
276 */
277
278 __netif_tx_lock(txq, smp_processor_id());
279
280 if ((netif_tx_queue_stopped(txq)) &&
281 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 282 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
283 netif_tx_wake_queue(txq);
284
285 __netif_tx_unlock(txq);
286 }
287 return 0;
288}
289
290static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
291 u16 idx)
292{
293 u16 last_max = fp->last_max_sge;
294
295 if (SUB_S16(idx, last_max) > 0)
296 fp->last_max_sge = idx;
297}
298
621b4d66
DK
299static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
300 u16 sge_len,
301 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
302{
303 struct bnx2x *bp = fp->bp;
9f6c9258
DK
304 u16 last_max, last_elem, first_elem;
305 u16 delta = 0;
306 u16 i;
307
308 if (!sge_len)
309 return;
310
311 /* First mark all used pages */
312 for (i = 0; i < sge_len; i++)
619c5cb6 313 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 314 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
315
316 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 317 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
318
319 /* Here we assume that the last SGE index is the biggest */
320 prefetch((void *)(fp->sge_mask));
523224a3 321 bnx2x_update_last_max_sge(fp,
621b4d66 322 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
323
324 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
325 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
326 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
327
328 /* If ring is not full */
329 if (last_elem + 1 != first_elem)
330 last_elem++;
331
332 /* Now update the prod */
333 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
334 if (likely(fp->sge_mask[i]))
335 break;
336
619c5cb6
VZ
337 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
338 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
339 }
340
341 if (delta > 0) {
342 fp->rx_sge_prod += delta;
343 /* clear page-end entries */
344 bnx2x_clear_sge_mask_next_elems(fp);
345 }
346
347 DP(NETIF_MSG_RX_STATUS,
348 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
349 fp->last_max_sge, fp->rx_sge_prod);
350}
351
2de67439 352/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
353 * CQE (calculated by HW).
354 */
355static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
356 const struct eth_fast_path_rx_cqe *cqe,
357 bool *l4_rxhash)
e52fcb24 358{
2de67439 359 /* Get Toeplitz hash from CQE */
e52fcb24 360 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
361 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
362 enum eth_rss_hash_type htype;
363
364 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
365 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
366 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 367 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
368 }
369 *l4_rxhash = false;
e52fcb24
ED
370 return 0;
371}
372
9f6c9258 373static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 374 u16 cons, u16 prod,
619c5cb6 375 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
376{
377 struct bnx2x *bp = fp->bp;
378 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
379 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
380 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
381 dma_addr_t mapping;
619c5cb6
VZ
382 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
383 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 384
619c5cb6
VZ
385 /* print error if current state != stop */
386 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
387 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
388
e52fcb24 389 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 390 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 391 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
392 fp->rx_buf_size, DMA_FROM_DEVICE);
393 /*
394 * ...if it fails - move the skb from the consumer to the producer
395 * and set the current aggregation state as ERROR to drop it
396 * when TPA_STOP arrives.
397 */
398
399 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
400 /* Move the BD from the consumer to the producer */
e52fcb24 401 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
402 tpa_info->tpa_state = BNX2X_TPA_ERROR;
403 return;
404 }
9f6c9258 405
e52fcb24
ED
406 /* move empty data from pool to prod */
407 prod_rx_buf->data = first_buf->data;
619c5cb6 408 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 409 /* point prod_bd to new data */
9f6c9258
DK
410 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
411 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
412
619c5cb6
VZ
413 /* move partial skb from cons to pool (don't unmap yet) */
414 *first_buf = *cons_rx_buf;
415
416 /* mark bin state as START */
417 tpa_info->parsing_flags =
418 le16_to_cpu(cqe->pars_flags.flags);
419 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
420 tpa_info->tpa_state = BNX2X_TPA_START;
421 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
422 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 423 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
424 if (fp->mode == TPA_MODE_GRO) {
425 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 426 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
427 tpa_info->gro_size = gro_size;
428 }
619c5cb6 429
9f6c9258
DK
430#ifdef BNX2X_STOP_ON_ERROR
431 fp->tpa_queue_used |= (1 << queue);
432#ifdef _ASM_GENERIC_INT_L64_H
433 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
434#else
435 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
436#endif
437 fp->tpa_queue_used);
438#endif
439}
440
e4e3c02a
VZ
441/* Timestamp option length allowed for TPA aggregation:
442 *
443 * nop nop kind length echo val
444 */
445#define TPA_TSTAMP_OPT_LEN 12
446/**
cbf1de72 447 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 448 *
cbf1de72 449 * @skb: packet skb
e8920674
DK
450 * @parsing_flags: parsing flags from the START CQE
451 * @len_on_bd: total length of the first packet for the
452 * aggregation.
cbf1de72 453 * @pkt_len: length of all segments
e8920674
DK
454 *
455 * Approximate value of the MSS for this aggregation calculated using
456 * the first packet of it.
2de67439 457 * Compute number of aggregated segments, and gso_type.
e4e3c02a 458 */
cbf1de72 459static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
460 u16 len_on_bd, unsigned int pkt_len,
461 u16 num_of_coalesced_segs)
e4e3c02a 462{
cbf1de72 463 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 464 * other than timestamp or IPv6 extension headers.
e4e3c02a 465 */
619c5cb6
VZ
466 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
467
468 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 469 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 470 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
471 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
472 } else {
619c5cb6 473 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
474 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
475 }
e4e3c02a
VZ
476
477 /* Check if there was a TCP timestamp, if there is it's will
478 * always be 12 bytes length: nop nop kind length echo val.
479 *
480 * Otherwise FW would close the aggregation.
481 */
482 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
483 hdrs_len += TPA_TSTAMP_OPT_LEN;
484
cbf1de72
YM
485 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
486
487 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
488 * to skb_shinfo(skb)->gso_segs
489 */
ab5777d7 490 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
491}
492
1191cb83
ED
493static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
494 struct bnx2x_fastpath *fp, u16 index)
495{
496 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
497 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
498 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
499 dma_addr_t mapping;
500
501 if (unlikely(page == NULL)) {
502 BNX2X_ERR("Can't alloc sge\n");
503 return -ENOMEM;
504 }
505
506 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 507 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
508 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
509 __free_pages(page, PAGES_PER_SGE_SHIFT);
510 BNX2X_ERR("Can't map sge\n");
511 return -ENOMEM;
512 }
513
514 sw_buf->page = page;
515 dma_unmap_addr_set(sw_buf, mapping, mapping);
516
517 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
518 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
519
520 return 0;
521}
522
9f6c9258 523static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
524 struct bnx2x_agg_info *tpa_info,
525 u16 pages,
526 struct sk_buff *skb,
619c5cb6
VZ
527 struct eth_end_agg_rx_cqe *cqe,
528 u16 cqe_idx)
9f6c9258
DK
529{
530 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
531 u32 i, frag_len, frag_size;
532 int err, j, frag_id = 0;
619c5cb6 533 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 534 u16 full_page = 0, gro_size = 0;
9f6c9258 535
619c5cb6 536 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
537
538 if (fp->mode == TPA_MODE_GRO) {
539 gro_size = tpa_info->gro_size;
540 full_page = tpa_info->full_page;
541 }
9f6c9258
DK
542
543 /* This is needed in order to enable forwarding support */
cbf1de72
YM
544 if (frag_size)
545 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
546 le16_to_cpu(cqe->pkt_len),
547 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 548
9f6c9258 549#ifdef BNX2X_STOP_ON_ERROR
924d75ab 550 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
551 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
552 pages, cqe_idx);
619c5cb6 553 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
554 bnx2x_panic();
555 return -EINVAL;
556 }
557#endif
558
559 /* Run through the SGL and compose the fragmented skb */
560 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 561 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
562
563 /* FW gives the indices of the SGE as if the ring is an array
564 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
565 if (fp->mode == TPA_MODE_GRO)
566 frag_len = min_t(u32, frag_size, (u32)full_page);
567 else /* LRO */
924d75ab 568 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 569
9f6c9258
DK
570 rx_pg = &fp->rx_page_ring[sge_idx];
571 old_rx_pg = *rx_pg;
572
573 /* If we fail to allocate a substitute page, we simply stop
574 where we are and drop the whole packet */
575 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
576 if (unlikely(err)) {
15192a8c 577 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
578 return err;
579 }
580
16a5fd92 581 /* Unmap the page as we're going to pass it to the stack */
9f6c9258
DK
582 dma_unmap_page(&bp->pdev->dev,
583 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 584 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 585 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
586 if (fp->mode == TPA_MODE_LRO)
587 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
588 else { /* GRO */
589 int rem;
590 int offset = 0;
591 for (rem = frag_len; rem > 0; rem -= gro_size) {
592 int len = rem > gro_size ? gro_size : rem;
593 skb_fill_page_desc(skb, frag_id++,
594 old_rx_pg.page, offset, len);
595 if (offset)
596 get_page(old_rx_pg.page);
597 offset += len;
598 }
599 }
9f6c9258
DK
600
601 skb->data_len += frag_len;
924d75ab 602 skb->truesize += SGE_PAGES;
9f6c9258
DK
603 skb->len += frag_len;
604
605 frag_size -= frag_len;
606 }
607
608 return 0;
609}
610
d46d132c
ED
611static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
612{
613 if (fp->rx_frag_size)
614 put_page(virt_to_head_page(data));
615 else
616 kfree(data);
617}
618
619static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
620{
621 if (fp->rx_frag_size)
622 return netdev_alloc_frag(fp->rx_frag_size);
623
624 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
625}
626
9969085e
YM
627#ifdef CONFIG_INET
628static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
629{
630 const struct iphdr *iph = ip_hdr(skb);
631 struct tcphdr *th;
632
633 skb_set_transport_header(skb, sizeof(struct iphdr));
634 th = tcp_hdr(skb);
635
636 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
637 iph->saddr, iph->daddr, 0);
638}
639
640static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
641{
642 struct ipv6hdr *iph = ipv6_hdr(skb);
643 struct tcphdr *th;
644
645 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
646 th = tcp_hdr(skb);
647
648 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
649 &iph->saddr, &iph->daddr, 0);
650}
2c2d06d5
YM
651
652static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
653 void (*gro_func)(struct bnx2x*, struct sk_buff*))
654{
655 skb_set_network_header(skb, 0);
656 gro_func(bp, skb);
657 tcp_gro_complete(skb);
658}
9969085e
YM
659#endif
660
661static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
662 struct sk_buff *skb)
663{
664#ifdef CONFIG_INET
cbf1de72 665 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
666 switch (be16_to_cpu(skb->protocol)) {
667 case ETH_P_IP:
2c2d06d5 668 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
669 break;
670 case ETH_P_IPV6:
2c2d06d5 671 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
672 break;
673 default:
2c2d06d5 674 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
675 be16_to_cpu(skb->protocol));
676 }
9969085e
YM
677 }
678#endif
679 napi_gro_receive(&fp->napi, skb);
680}
681
1191cb83
ED
682static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
683 struct bnx2x_agg_info *tpa_info,
684 u16 pages,
685 struct eth_end_agg_rx_cqe *cqe,
686 u16 cqe_idx)
9f6c9258 687{
619c5cb6 688 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 689 u8 pad = tpa_info->placement_offset;
619c5cb6 690 u16 len = tpa_info->len_on_bd;
e52fcb24 691 struct sk_buff *skb = NULL;
621b4d66 692 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
693 u8 old_tpa_state = tpa_info->tpa_state;
694
695 tpa_info->tpa_state = BNX2X_TPA_STOP;
696
697 /* If we there was an error during the handling of the TPA_START -
698 * drop this aggregation.
699 */
700 if (old_tpa_state == BNX2X_TPA_ERROR)
701 goto drop;
702
e52fcb24 703 /* Try to allocate the new data */
d46d132c 704 new_data = bnx2x_frag_alloc(fp);
9f6c9258
DK
705 /* Unmap skb in the pool anyway, as we are going to change
706 pool entry status to BNX2X_TPA_STOP even if new skb allocation
707 fails. */
708 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 709 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 710 if (likely(new_data))
d46d132c 711 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 712
e52fcb24 713 if (likely(skb)) {
9f6c9258 714#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 715 if (pad + len > fp->rx_buf_size) {
51c1a580 716 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 717 pad, len, fp->rx_buf_size);
9f6c9258
DK
718 bnx2x_panic();
719 return;
720 }
721#endif
722
e52fcb24 723 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 724 skb_put(skb, len);
e52fcb24 725 skb->rxhash = tpa_info->rxhash;
a334b5fb 726 skb->l4_rxhash = tpa_info->l4_rxhash;
9f6c9258
DK
727
728 skb->protocol = eth_type_trans(skb, bp->dev);
729 skb->ip_summed = CHECKSUM_UNNECESSARY;
730
621b4d66
DK
731 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
732 skb, cqe, cqe_idx)) {
619c5cb6 733 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 734 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 735 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 736 } else {
51c1a580
MS
737 DP(NETIF_MSG_RX_STATUS,
738 "Failed to allocate new pages - dropping packet!\n");
40955532 739 dev_kfree_skb_any(skb);
9f6c9258
DK
740 }
741
e52fcb24
ED
742 /* put new data in bin */
743 rx_buf->data = new_data;
9f6c9258 744
619c5cb6 745 return;
9f6c9258 746 }
d46d132c 747 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
748drop:
749 /* drop the packet and keep the buffer in the bin */
750 DP(NETIF_MSG_RX_STATUS,
751 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 752 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
753}
754
1191cb83
ED
755static int bnx2x_alloc_rx_data(struct bnx2x *bp,
756 struct bnx2x_fastpath *fp, u16 index)
757{
758 u8 *data;
759 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
760 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
761 dma_addr_t mapping;
762
d46d132c 763 data = bnx2x_frag_alloc(fp);
1191cb83
ED
764 if (unlikely(data == NULL))
765 return -ENOMEM;
766
767 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
768 fp->rx_buf_size,
769 DMA_FROM_DEVICE);
770 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 771 bnx2x_frag_free(fp, data);
1191cb83
ED
772 BNX2X_ERR("Can't map rx data\n");
773 return -ENOMEM;
774 }
775
776 rx_buf->data = data;
777 dma_unmap_addr_set(rx_buf, mapping, mapping);
778
779 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
780 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
781
782 return 0;
783}
784
15192a8c
BW
785static
786void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
787 struct bnx2x_fastpath *fp,
788 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 789{
e488921f
MS
790 /* Do nothing if no L4 csum validation was done.
791 * We do not check whether IP csum was validated. For IPv4 we assume
792 * that if the card got as far as validating the L4 csum, it also
793 * validated the IP csum. IPv6 has no IP csum.
794 */
d6cb3e41 795 if (cqe->fast_path_cqe.status_flags &
e488921f 796 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
797 return;
798
e488921f 799 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
800
801 if (cqe->fast_path_cqe.type_error_flags &
802 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
803 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 804 qstats->hw_csum_err++;
d6cb3e41
ED
805 else
806 skb->ip_summed = CHECKSUM_UNNECESSARY;
807}
9f6c9258
DK
808
809int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
810{
811 struct bnx2x *bp = fp->bp;
812 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 813 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 814 int rx_pkt = 0;
75b29459
DK
815 union eth_rx_cqe *cqe;
816 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
817
818#ifdef BNX2X_STOP_ON_ERROR
819 if (unlikely(bp->panic))
820 return 0;
821#endif
822
9f6c9258
DK
823 bd_cons = fp->rx_bd_cons;
824 bd_prod = fp->rx_bd_prod;
825 bd_prod_fw = bd_prod;
826 sw_comp_cons = fp->rx_comp_cons;
827 sw_comp_prod = fp->rx_comp_prod;
828
75b29459
DK
829 comp_ring_cons = RCQ_BD(sw_comp_cons);
830 cqe = &fp->rx_comp_ring[comp_ring_cons];
831 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
832
833 DP(NETIF_MSG_RX_STATUS,
75b29459 834 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 835
75b29459 836 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
837 struct sw_rx_bd *rx_buf = NULL;
838 struct sk_buff *skb;
9f6c9258 839 u8 cqe_fp_flags;
619c5cb6 840 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 841 u16 len, pad, queue;
e52fcb24 842 u8 *data;
a334b5fb 843 bool l4_rxhash;
9f6c9258 844
619c5cb6
VZ
845#ifdef BNX2X_STOP_ON_ERROR
846 if (unlikely(bp->panic))
847 return 0;
848#endif
849
9f6c9258
DK
850 bd_prod = RX_BD(bd_prod);
851 bd_cons = RX_BD(bd_cons);
852
619c5cb6
VZ
853 cqe_fp_flags = cqe_fp->type_error_flags;
854 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 855
51c1a580
MS
856 DP(NETIF_MSG_RX_STATUS,
857 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
858 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
859 cqe_fp_flags, cqe_fp->status_flags,
860 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
861 le16_to_cpu(cqe_fp->vlan_tag),
862 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
863
864 /* is this a slowpath msg? */
619c5cb6 865 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
866 bnx2x_sp_event(fp, cqe);
867 goto next_cqe;
e52fcb24 868 }
621b4d66 869
e52fcb24
ED
870 rx_buf = &fp->rx_buf_ring[bd_cons];
871 data = rx_buf->data;
9f6c9258 872
e52fcb24 873 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
874 struct bnx2x_agg_info *tpa_info;
875 u16 frag_size, pages;
619c5cb6 876#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
877 /* sanity check */
878 if (fp->disable_tpa &&
879 (CQE_TYPE_START(cqe_fp_type) ||
880 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 881 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 882 CQE_TYPE(cqe_fp_type));
619c5cb6 883#endif
9f6c9258 884
e52fcb24
ED
885 if (CQE_TYPE_START(cqe_fp_type)) {
886 u16 queue = cqe_fp->queue_index;
887 DP(NETIF_MSG_RX_STATUS,
888 "calling tpa_start on queue %d\n",
889 queue);
9f6c9258 890
e52fcb24
ED
891 bnx2x_tpa_start(fp, queue,
892 bd_cons, bd_prod,
893 cqe_fp);
621b4d66 894
e52fcb24 895 goto next_rx;
621b4d66
DK
896 }
897 queue = cqe->end_agg_cqe.queue_index;
898 tpa_info = &fp->tpa_info[queue];
899 DP(NETIF_MSG_RX_STATUS,
900 "calling tpa_stop on queue %d\n",
901 queue);
902
903 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
904 tpa_info->len_on_bd;
905
906 if (fp->mode == TPA_MODE_GRO)
907 pages = (frag_size + tpa_info->full_page - 1) /
908 tpa_info->full_page;
909 else
910 pages = SGE_PAGE_ALIGN(frag_size) >>
911 SGE_PAGE_SHIFT;
912
913 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
914 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 915#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
916 if (bp->panic)
917 return 0;
9f6c9258
DK
918#endif
919
621b4d66
DK
920 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
921 goto next_cqe;
e52fcb24
ED
922 }
923 /* non TPA */
621b4d66 924 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
925 pad = cqe_fp->placement_offset;
926 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 927 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
928 pad + RX_COPY_THRESH,
929 DMA_FROM_DEVICE);
930 pad += NET_SKB_PAD;
931 prefetch(data + pad); /* speedup eth_type_trans() */
932 /* is this an error packet? */
933 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 934 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
935 "ERROR flags %x rx packet %u\n",
936 cqe_fp_flags, sw_comp_cons);
15192a8c 937 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
938 goto reuse_rx;
939 }
9f6c9258 940
e52fcb24
ED
941 /* Since we don't have a jumbo ring
942 * copy small packets if mtu > 1500
943 */
944 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
945 (len <= RX_COPY_THRESH)) {
946 skb = netdev_alloc_skb_ip_align(bp->dev, len);
947 if (skb == NULL) {
51c1a580 948 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 949 "ERROR packet dropped because of alloc failure\n");
15192a8c 950 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
951 goto reuse_rx;
952 }
e52fcb24
ED
953 memcpy(skb->data, data + pad, len);
954 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
955 } else {
956 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 957 dma_unmap_single(&bp->pdev->dev,
e52fcb24 958 dma_unmap_addr(rx_buf, mapping),
a8c94b91 959 fp->rx_buf_size,
9f6c9258 960 DMA_FROM_DEVICE);
d46d132c 961 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 962 if (unlikely(!skb)) {
d46d132c 963 bnx2x_frag_free(fp, data);
15192a8c
BW
964 bnx2x_fp_qstats(bp, fp)->
965 rx_skb_alloc_failed++;
e52fcb24
ED
966 goto next_rx;
967 }
9f6c9258 968 skb_reserve(skb, pad);
9f6c9258 969 } else {
51c1a580
MS
970 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
971 "ERROR packet dropped because of alloc failure\n");
15192a8c 972 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 973reuse_rx:
e52fcb24 974 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
975 goto next_rx;
976 }
036d2df9 977 }
9f6c9258 978
036d2df9
DK
979 skb_put(skb, len);
980 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 981
036d2df9 982 /* Set Toeplitz hash for a none-LRO skb */
a334b5fb
ED
983 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
984 skb->l4_rxhash = l4_rxhash;
9f6c9258 985
036d2df9 986 skb_checksum_none_assert(skb);
f85582f8 987
d6cb3e41 988 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
989 bnx2x_csum_validate(skb, cqe, fp,
990 bnx2x_fp_qstats(bp, fp));
9f6c9258 991
f233cafe 992 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 993
619c5cb6
VZ
994 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
995 PARSING_FLAGS_VLAN)
86a9bad3 996 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 997 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 998
8b80cda5 999 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1000
1001 if (bnx2x_fp_ll_polling(fp))
1002 netif_receive_skb(skb);
1003 else
1004 napi_gro_receive(&fp->napi, skb);
9f6c9258 1005next_rx:
e52fcb24 1006 rx_buf->data = NULL;
9f6c9258
DK
1007
1008 bd_cons = NEXT_RX_IDX(bd_cons);
1009 bd_prod = NEXT_RX_IDX(bd_prod);
1010 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1011 rx_pkt++;
1012next_cqe:
1013 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1014 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1015
75b29459
DK
1016 /* mark CQE as free */
1017 BNX2X_SEED_CQE(cqe_fp);
1018
9f6c9258
DK
1019 if (rx_pkt == budget)
1020 break;
75b29459
DK
1021
1022 comp_ring_cons = RCQ_BD(sw_comp_cons);
1023 cqe = &fp->rx_comp_ring[comp_ring_cons];
1024 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1025 } /* while */
1026
1027 fp->rx_bd_cons = bd_cons;
1028 fp->rx_bd_prod = bd_prod_fw;
1029 fp->rx_comp_cons = sw_comp_cons;
1030 fp->rx_comp_prod = sw_comp_prod;
1031
1032 /* Update producers */
1033 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1034 fp->rx_sge_prod);
1035
1036 fp->rx_pkt += rx_pkt;
1037 fp->rx_calls++;
1038
1039 return rx_pkt;
1040}
1041
1042static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1043{
1044 struct bnx2x_fastpath *fp = fp_cookie;
1045 struct bnx2x *bp = fp->bp;
6383c0b3 1046 u8 cos;
9f6c9258 1047
51c1a580
MS
1048 DP(NETIF_MSG_INTR,
1049 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1050 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1051
523224a3 1052 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1053
1054#ifdef BNX2X_STOP_ON_ERROR
1055 if (unlikely(bp->panic))
1056 return IRQ_HANDLED;
1057#endif
1058
1059 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1060 for_each_cos_in_tx_queue(fp, cos)
65565884 1061 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1062
523224a3 1063 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1064 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1065
1066 return IRQ_HANDLED;
1067}
1068
9f6c9258
DK
1069/* HW Lock for shared dual port PHYs */
1070void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1071{
1072 mutex_lock(&bp->port.phy_mutex);
1073
8203c4b6 1074 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1075}
1076
1077void bnx2x_release_phy_lock(struct bnx2x *bp)
1078{
8203c4b6 1079 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1080
1081 mutex_unlock(&bp->port.phy_mutex);
1082}
1083
0793f83f
DK
1084/* calculates MF speed according to current linespeed and MF configuration */
1085u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1086{
1087 u16 line_speed = bp->link_vars.line_speed;
1088 if (IS_MF(bp)) {
faa6fcbb
DK
1089 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1090 bp->mf_config[BP_VN(bp)]);
1091
1092 /* Calculate the current MAX line speed limit for the MF
1093 * devices
0793f83f 1094 */
faa6fcbb
DK
1095 if (IS_MF_SI(bp))
1096 line_speed = (line_speed * maxCfg) / 100;
1097 else { /* SD mode */
0793f83f
DK
1098 u16 vn_max_rate = maxCfg * 100;
1099
1100 if (vn_max_rate < line_speed)
1101 line_speed = vn_max_rate;
faa6fcbb 1102 }
0793f83f
DK
1103 }
1104
1105 return line_speed;
1106}
1107
2ae17f66
VZ
1108/**
1109 * bnx2x_fill_report_data - fill link report data to report
1110 *
1111 * @bp: driver handle
1112 * @data: link state to update
1113 *
1114 * It uses a none-atomic bit operations because is called under the mutex.
1115 */
1191cb83
ED
1116static void bnx2x_fill_report_data(struct bnx2x *bp,
1117 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1118{
1119 u16 line_speed = bnx2x_get_mf_speed(bp);
1120
1121 memset(data, 0, sizeof(*data));
1122
16a5fd92 1123 /* Fill the report data: effective line speed */
2ae17f66
VZ
1124 data->line_speed = line_speed;
1125
1126 /* Link is down */
1127 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1128 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1129 &data->link_report_flags);
1130
1131 /* Full DUPLEX */
1132 if (bp->link_vars.duplex == DUPLEX_FULL)
1133 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1134
1135 /* Rx Flow Control is ON */
1136 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1137 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1138
1139 /* Tx Flow Control is ON */
1140 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1141 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1142}
1143
1144/**
1145 * bnx2x_link_report - report link status to OS.
1146 *
1147 * @bp: driver handle
1148 *
1149 * Calls the __bnx2x_link_report() under the same locking scheme
1150 * as a link/PHY state managing code to ensure a consistent link
1151 * reporting.
1152 */
1153
9f6c9258
DK
1154void bnx2x_link_report(struct bnx2x *bp)
1155{
2ae17f66
VZ
1156 bnx2x_acquire_phy_lock(bp);
1157 __bnx2x_link_report(bp);
1158 bnx2x_release_phy_lock(bp);
1159}
9f6c9258 1160
2ae17f66
VZ
1161/**
1162 * __bnx2x_link_report - report link status to OS.
1163 *
1164 * @bp: driver handle
1165 *
16a5fd92 1166 * None atomic implementation.
2ae17f66
VZ
1167 * Should be called under the phy_lock.
1168 */
1169void __bnx2x_link_report(struct bnx2x *bp)
1170{
1171 struct bnx2x_link_report_data cur_data;
9f6c9258 1172
2ae17f66 1173 /* reread mf_cfg */
ad5afc89 1174 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1175 bnx2x_read_mf_cfg(bp);
1176
1177 /* Read the current link report info */
1178 bnx2x_fill_report_data(bp, &cur_data);
1179
1180 /* Don't report link down or exactly the same link status twice */
1181 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1182 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1183 &bp->last_reported_link.link_report_flags) &&
1184 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1185 &cur_data.link_report_flags)))
1186 return;
1187
1188 bp->link_cnt++;
9f6c9258 1189
2ae17f66
VZ
1190 /* We are going to report a new link parameters now -
1191 * remember the current data for the next time.
1192 */
1193 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1194
2ae17f66
VZ
1195 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1196 &cur_data.link_report_flags)) {
1197 netif_carrier_off(bp->dev);
1198 netdev_err(bp->dev, "NIC Link is Down\n");
1199 return;
1200 } else {
94f05b0f
JP
1201 const char *duplex;
1202 const char *flow;
1203
2ae17f66 1204 netif_carrier_on(bp->dev);
9f6c9258 1205
2ae17f66
VZ
1206 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1207 &cur_data.link_report_flags))
94f05b0f 1208 duplex = "full";
9f6c9258 1209 else
94f05b0f 1210 duplex = "half";
9f6c9258 1211
2ae17f66
VZ
1212 /* Handle the FC at the end so that only these flags would be
1213 * possibly set. This way we may easily check if there is no FC
1214 * enabled.
1215 */
1216 if (cur_data.link_report_flags) {
1217 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1218 &cur_data.link_report_flags)) {
2ae17f66
VZ
1219 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1220 &cur_data.link_report_flags))
94f05b0f
JP
1221 flow = "ON - receive & transmit";
1222 else
1223 flow = "ON - receive";
9f6c9258 1224 } else {
94f05b0f 1225 flow = "ON - transmit";
9f6c9258 1226 }
94f05b0f
JP
1227 } else {
1228 flow = "none";
9f6c9258 1229 }
94f05b0f
JP
1230 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1231 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1232 }
1233}
1234
1191cb83
ED
1235static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1236{
1237 int i;
1238
1239 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1240 struct eth_rx_sge *sge;
1241
1242 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1243 sge->addr_hi =
1244 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1245 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1246
1247 sge->addr_lo =
1248 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1249 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1250 }
1251}
1252
1253static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1254 struct bnx2x_fastpath *fp, int last)
1255{
1256 int i;
1257
1258 for (i = 0; i < last; i++) {
1259 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1260 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1261 u8 *data = first_buf->data;
1262
1263 if (data == NULL) {
1264 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1265 continue;
1266 }
1267 if (tpa_info->tpa_state == BNX2X_TPA_START)
1268 dma_unmap_single(&bp->pdev->dev,
1269 dma_unmap_addr(first_buf, mapping),
1270 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1271 bnx2x_frag_free(fp, data);
1191cb83
ED
1272 first_buf->data = NULL;
1273 }
1274}
1275
55c11941
MS
1276void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1277{
1278 int j;
1279
1280 for_each_rx_queue_cnic(bp, j) {
1281 struct bnx2x_fastpath *fp = &bp->fp[j];
1282
1283 fp->rx_bd_cons = 0;
1284
1285 /* Activate BD ring */
1286 /* Warning!
1287 * this will generate an interrupt (to the TSTORM)
1288 * must only be done after chip is initialized
1289 */
1290 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1291 fp->rx_sge_prod);
1292 }
1293}
1294
9f6c9258
DK
1295void bnx2x_init_rx_rings(struct bnx2x *bp)
1296{
1297 int func = BP_FUNC(bp);
523224a3 1298 u16 ring_prod;
9f6c9258 1299 int i, j;
25141580 1300
b3b83c3f 1301 /* Allocate TPA resources */
55c11941 1302 for_each_eth_queue(bp, j) {
523224a3 1303 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1304
a8c94b91
VZ
1305 DP(NETIF_MSG_IFUP,
1306 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1307
523224a3 1308 if (!fp->disable_tpa) {
16a5fd92 1309 /* Fill the per-aggregation pool */
dfacf138 1310 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1311 struct bnx2x_agg_info *tpa_info =
1312 &fp->tpa_info[i];
1313 struct sw_rx_bd *first_buf =
1314 &tpa_info->first_buf;
1315
d46d132c 1316 first_buf->data = bnx2x_frag_alloc(fp);
e52fcb24 1317 if (!first_buf->data) {
51c1a580
MS
1318 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1319 j);
9f6c9258
DK
1320 bnx2x_free_tpa_pool(bp, fp, i);
1321 fp->disable_tpa = 1;
1322 break;
1323 }
619c5cb6
VZ
1324 dma_unmap_addr_set(first_buf, mapping, 0);
1325 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1326 }
523224a3
DK
1327
1328 /* "next page" elements initialization */
1329 bnx2x_set_next_page_sgl(fp);
1330
1331 /* set SGEs bit mask */
1332 bnx2x_init_sge_ring_bit_mask(fp);
1333
1334 /* Allocate SGEs and initialize the ring elements */
1335 for (i = 0, ring_prod = 0;
1336 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1337
1338 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1339 BNX2X_ERR("was only able to allocate %d rx sges\n",
1340 i);
1341 BNX2X_ERR("disabling TPA for queue[%d]\n",
1342 j);
523224a3 1343 /* Cleanup already allocated elements */
619c5cb6
VZ
1344 bnx2x_free_rx_sge_range(bp, fp,
1345 ring_prod);
1346 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1347 MAX_AGG_QS(bp));
523224a3
DK
1348 fp->disable_tpa = 1;
1349 ring_prod = 0;
1350 break;
1351 }
1352 ring_prod = NEXT_SGE_IDX(ring_prod);
1353 }
1354
1355 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1356 }
1357 }
1358
55c11941 1359 for_each_eth_queue(bp, j) {
9f6c9258
DK
1360 struct bnx2x_fastpath *fp = &bp->fp[j];
1361
1362 fp->rx_bd_cons = 0;
9f6c9258 1363
b3b83c3f
DK
1364 /* Activate BD ring */
1365 /* Warning!
1366 * this will generate an interrupt (to the TSTORM)
1367 * must only be done after chip is initialized
1368 */
1369 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1370 fp->rx_sge_prod);
9f6c9258 1371
9f6c9258
DK
1372 if (j != 0)
1373 continue;
1374
619c5cb6 1375 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1376 REG_WR(bp, BAR_USTRORM_INTMEM +
1377 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1378 U64_LO(fp->rx_comp_mapping));
1379 REG_WR(bp, BAR_USTRORM_INTMEM +
1380 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1381 U64_HI(fp->rx_comp_mapping));
1382 }
9f6c9258
DK
1383 }
1384}
f85582f8 1385
55c11941 1386static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1387{
6383c0b3 1388 u8 cos;
55c11941 1389 struct bnx2x *bp = fp->bp;
9f6c9258 1390
55c11941
MS
1391 for_each_cos_in_tx_queue(fp, cos) {
1392 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1393 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1394
55c11941
MS
1395 u16 sw_prod = txdata->tx_pkt_prod;
1396 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1397
55c11941
MS
1398 while (sw_cons != sw_prod) {
1399 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1400 &pkts_compl, &bytes_compl);
1401 sw_cons++;
9f6c9258 1402 }
55c11941
MS
1403
1404 netdev_tx_reset_queue(
1405 netdev_get_tx_queue(bp->dev,
1406 txdata->txq_index));
1407 }
1408}
1409
1410static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1411{
1412 int i;
1413
1414 for_each_tx_queue_cnic(bp, i) {
1415 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1416 }
1417}
1418
1419static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1420{
1421 int i;
1422
1423 for_each_eth_queue(bp, i) {
1424 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1425 }
1426}
1427
b3b83c3f
DK
1428static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1429{
1430 struct bnx2x *bp = fp->bp;
1431 int i;
1432
1433 /* ring wasn't allocated */
1434 if (fp->rx_buf_ring == NULL)
1435 return;
1436
1437 for (i = 0; i < NUM_RX_BD; i++) {
1438 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1439 u8 *data = rx_buf->data;
b3b83c3f 1440
e52fcb24 1441 if (data == NULL)
b3b83c3f 1442 continue;
b3b83c3f
DK
1443 dma_unmap_single(&bp->pdev->dev,
1444 dma_unmap_addr(rx_buf, mapping),
1445 fp->rx_buf_size, DMA_FROM_DEVICE);
1446
e52fcb24 1447 rx_buf->data = NULL;
d46d132c 1448 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1449 }
1450}
1451
55c11941
MS
1452static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1453{
1454 int j;
1455
1456 for_each_rx_queue_cnic(bp, j) {
1457 bnx2x_free_rx_bds(&bp->fp[j]);
1458 }
1459}
1460
9f6c9258
DK
1461static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1462{
b3b83c3f 1463 int j;
9f6c9258 1464
55c11941 1465 for_each_eth_queue(bp, j) {
9f6c9258
DK
1466 struct bnx2x_fastpath *fp = &bp->fp[j];
1467
b3b83c3f 1468 bnx2x_free_rx_bds(fp);
9f6c9258 1469
9f6c9258 1470 if (!fp->disable_tpa)
dfacf138 1471 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1472 }
1473}
1474
55c11941
MS
1475void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1476{
1477 bnx2x_free_tx_skbs_cnic(bp);
1478 bnx2x_free_rx_skbs_cnic(bp);
1479}
1480
9f6c9258
DK
1481void bnx2x_free_skbs(struct bnx2x *bp)
1482{
1483 bnx2x_free_tx_skbs(bp);
1484 bnx2x_free_rx_skbs(bp);
1485}
1486
e3835b99
DK
1487void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1488{
1489 /* load old values */
1490 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1491
1492 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1493 /* leave all but MAX value */
1494 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1495
1496 /* set new MAX value */
1497 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1498 & FUNC_MF_CFG_MAX_BW_MASK;
1499
1500 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1501 }
1502}
1503
ca92429f
DK
1504/**
1505 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1506 *
1507 * @bp: driver handle
1508 * @nvecs: number of vectors to be released
1509 */
1510static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1511{
ca92429f 1512 int i, offset = 0;
9f6c9258 1513
ca92429f
DK
1514 if (nvecs == offset)
1515 return;
ad5afc89
AE
1516
1517 /* VFs don't have a default SB */
1518 if (IS_PF(bp)) {
1519 free_irq(bp->msix_table[offset].vector, bp->dev);
1520 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1521 bp->msix_table[offset].vector);
1522 offset++;
1523 }
55c11941
MS
1524
1525 if (CNIC_SUPPORT(bp)) {
1526 if (nvecs == offset)
1527 return;
1528 offset++;
1529 }
ca92429f 1530
ec6ba945 1531 for_each_eth_queue(bp, i) {
ca92429f
DK
1532 if (nvecs == offset)
1533 return;
51c1a580
MS
1534 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1535 i, bp->msix_table[offset].vector);
9f6c9258 1536
ca92429f 1537 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1538 }
1539}
1540
d6214d7a 1541void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1542{
30a5de77 1543 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1544 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1545 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1546
1547 /* vfs don't have a default status block */
1548 if (IS_PF(bp))
1549 nvecs++;
1550
1551 bnx2x_free_msix_irqs(bp, nvecs);
1552 } else {
30a5de77 1553 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1554 }
9f6c9258
DK
1555}
1556
0e8d2ec5 1557int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1558{
1ab4434c 1559 int msix_vec = 0, i, rc;
9f6c9258 1560
1ab4434c
AE
1561 /* VFs don't have a default status block */
1562 if (IS_PF(bp)) {
1563 bp->msix_table[msix_vec].entry = msix_vec;
1564 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1565 bp->msix_table[0].entry);
1566 msix_vec++;
1567 }
9f6c9258 1568
55c11941
MS
1569 /* Cnic requires an msix vector for itself */
1570 if (CNIC_SUPPORT(bp)) {
1571 bp->msix_table[msix_vec].entry = msix_vec;
1572 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1573 msix_vec, bp->msix_table[msix_vec].entry);
1574 msix_vec++;
1575 }
1576
6383c0b3 1577 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1578 for_each_eth_queue(bp, i) {
d6214d7a 1579 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1580 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1581 msix_vec, msix_vec, i);
d6214d7a 1582 msix_vec++;
9f6c9258
DK
1583 }
1584
1ab4434c
AE
1585 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1586 msix_vec);
d6214d7a 1587
1ab4434c 1588 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1589
1590 /*
1591 * reconfigure number of tx/rx queues according to available
1592 * MSI-X vectors
1593 */
55c11941 1594 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1595 /* how less vectors we will have? */
1ab4434c 1596 int diff = msix_vec - rc;
9f6c9258 1597
51c1a580 1598 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1599
1600 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1601
1602 if (rc) {
30a5de77
DK
1603 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1604 goto no_msix;
9f6c9258 1605 }
d6214d7a
DK
1606 /*
1607 * decrease number of queues by number of unallocated entries
1608 */
55c11941
MS
1609 bp->num_ethernet_queues -= diff;
1610 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1611
51c1a580 1612 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1613 bp->num_queues);
1614 } else if (rc > 0) {
1615 /* Get by with single vector */
1616 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1617 if (rc) {
1618 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1619 rc);
1620 goto no_msix;
1621 }
1622
1623 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1624 bp->flags |= USING_SINGLE_MSIX_FLAG;
1625
55c11941
MS
1626 BNX2X_DEV_INFO("set number of queues to 1\n");
1627 bp->num_ethernet_queues = 1;
1628 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1629 } else if (rc < 0) {
51c1a580 1630 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1631 goto no_msix;
9f6c9258
DK
1632 }
1633
1634 bp->flags |= USING_MSIX_FLAG;
1635
1636 return 0;
30a5de77
DK
1637
1638no_msix:
1639 /* fall to INTx if not enough memory */
1640 if (rc == -ENOMEM)
1641 bp->flags |= DISABLE_MSI_FLAG;
1642
1643 return rc;
9f6c9258
DK
1644}
1645
1646static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1647{
ca92429f 1648 int i, rc, offset = 0;
9f6c9258 1649
ad5afc89
AE
1650 /* no default status block for vf */
1651 if (IS_PF(bp)) {
1652 rc = request_irq(bp->msix_table[offset++].vector,
1653 bnx2x_msix_sp_int, 0,
1654 bp->dev->name, bp->dev);
1655 if (rc) {
1656 BNX2X_ERR("request sp irq failed\n");
1657 return -EBUSY;
1658 }
9f6c9258
DK
1659 }
1660
55c11941
MS
1661 if (CNIC_SUPPORT(bp))
1662 offset++;
1663
ec6ba945 1664 for_each_eth_queue(bp, i) {
9f6c9258
DK
1665 struct bnx2x_fastpath *fp = &bp->fp[i];
1666 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1667 bp->dev->name, i);
1668
d6214d7a 1669 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1670 bnx2x_msix_fp_int, 0, fp->name, fp);
1671 if (rc) {
ca92429f
DK
1672 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1673 bp->msix_table[offset].vector, rc);
1674 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1675 return -EBUSY;
1676 }
1677
d6214d7a 1678 offset++;
9f6c9258
DK
1679 }
1680
ec6ba945 1681 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1682 if (IS_PF(bp)) {
1683 offset = 1 + CNIC_SUPPORT(bp);
1684 netdev_info(bp->dev,
1685 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1686 bp->msix_table[0].vector,
1687 0, bp->msix_table[offset].vector,
1688 i - 1, bp->msix_table[offset + i - 1].vector);
1689 } else {
1690 offset = CNIC_SUPPORT(bp);
1691 netdev_info(bp->dev,
1692 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1693 0, bp->msix_table[offset].vector,
1694 i - 1, bp->msix_table[offset + i - 1].vector);
1695 }
9f6c9258
DK
1696 return 0;
1697}
1698
d6214d7a 1699int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1700{
1701 int rc;
1702
1703 rc = pci_enable_msi(bp->pdev);
1704 if (rc) {
51c1a580 1705 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1706 return -1;
1707 }
1708 bp->flags |= USING_MSI_FLAG;
1709
1710 return 0;
1711}
1712
1713static int bnx2x_req_irq(struct bnx2x *bp)
1714{
1715 unsigned long flags;
30a5de77 1716 unsigned int irq;
9f6c9258 1717
30a5de77 1718 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1719 flags = 0;
1720 else
1721 flags = IRQF_SHARED;
1722
30a5de77
DK
1723 if (bp->flags & USING_MSIX_FLAG)
1724 irq = bp->msix_table[0].vector;
1725 else
1726 irq = bp->pdev->irq;
1727
1728 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1729}
1730
c957d09f 1731static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1732{
1733 int rc = 0;
30a5de77
DK
1734 if (bp->flags & USING_MSIX_FLAG &&
1735 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1736 rc = bnx2x_req_msix_irqs(bp);
1737 if (rc)
1738 return rc;
1739 } else {
619c5cb6
VZ
1740 rc = bnx2x_req_irq(bp);
1741 if (rc) {
1742 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1743 return rc;
1744 }
1745 if (bp->flags & USING_MSI_FLAG) {
1746 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1747 netdev_info(bp->dev, "using MSI IRQ %d\n",
1748 bp->dev->irq);
1749 }
1750 if (bp->flags & USING_MSIX_FLAG) {
1751 bp->dev->irq = bp->msix_table[0].vector;
1752 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1753 bp->dev->irq);
619c5cb6
VZ
1754 }
1755 }
1756
1757 return 0;
1758}
1759
55c11941
MS
1760static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1761{
1762 int i;
1763
8f20aa57
DK
1764 for_each_rx_queue_cnic(bp, i) {
1765 bnx2x_fp_init_lock(&bp->fp[i]);
55c11941 1766 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1767 }
55c11941
MS
1768}
1769
1191cb83 1770static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1771{
1772 int i;
1773
8f20aa57
DK
1774 for_each_eth_queue(bp, i) {
1775 bnx2x_fp_init_lock(&bp->fp[i]);
9f6c9258 1776 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1777 }
9f6c9258
DK
1778}
1779
55c11941
MS
1780static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1781{
1782 int i;
1783
8f20aa57
DK
1784 local_bh_disable();
1785 for_each_rx_queue_cnic(bp, i) {
55c11941 1786 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57
DK
1787 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1788 mdelay(1);
1789 }
1790 local_bh_enable();
55c11941
MS
1791}
1792
1191cb83 1793static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1794{
1795 int i;
1796
8f20aa57
DK
1797 local_bh_disable();
1798 for_each_eth_queue(bp, i) {
9f6c9258 1799 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57
DK
1800 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1801 mdelay(1);
1802 }
1803 local_bh_enable();
9f6c9258
DK
1804}
1805
1806void bnx2x_netif_start(struct bnx2x *bp)
1807{
4b7ed897
DK
1808 if (netif_running(bp->dev)) {
1809 bnx2x_napi_enable(bp);
55c11941
MS
1810 if (CNIC_LOADED(bp))
1811 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1812 bnx2x_int_enable(bp);
1813 if (bp->state == BNX2X_STATE_OPEN)
1814 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1815 }
1816}
1817
1818void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1819{
1820 bnx2x_int_disable_sync(bp, disable_hw);
1821 bnx2x_napi_disable(bp);
55c11941
MS
1822 if (CNIC_LOADED(bp))
1823 bnx2x_napi_disable_cnic(bp);
9f6c9258 1824}
9f6c9258 1825
8307fa3e
VZ
1826u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1827{
8307fa3e 1828 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1829
55c11941 1830 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1831 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1832 u16 ether_type = ntohs(hdr->h_proto);
1833
1834 /* Skip VLAN tag if present */
1835 if (ether_type == ETH_P_8021Q) {
1836 struct vlan_ethhdr *vhdr =
1837 (struct vlan_ethhdr *)skb->data;
1838
1839 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1840 }
1841
1842 /* If ethertype is FCoE or FIP - use FCoE ring */
1843 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1844 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1845 }
55c11941 1846
cdb9d6ae 1847 /* select a non-FCoE queue */
ada7c19e 1848 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1849}
1850
d6214d7a
DK
1851void bnx2x_set_num_queues(struct bnx2x *bp)
1852{
96305234 1853 /* RSS queues */
55c11941 1854 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1855
a3348722
BW
1856 /* override in STORAGE SD modes */
1857 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1858 bp->num_ethernet_queues = 1;
1859
ec6ba945 1860 /* Add special queues */
55c11941
MS
1861 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1862 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1863
1864 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1865}
1866
cdb9d6ae
VZ
1867/**
1868 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1869 *
1870 * @bp: Driver handle
1871 *
1872 * We currently support for at most 16 Tx queues for each CoS thus we will
1873 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1874 * bp->max_cos.
1875 *
1876 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1877 * index after all ETH L2 indices.
1878 *
1879 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1880 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1881 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1882 *
1883 * The proper configuration of skb->queue_mapping is handled by
1884 * bnx2x_select_queue() and __skb_tx_hash().
1885 *
1886 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1887 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1888 */
55c11941 1889static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1890{
6383c0b3 1891 int rc, tx, rx;
ec6ba945 1892
65565884 1893 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1894 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1895
6383c0b3 1896/* account for fcoe queue */
55c11941
MS
1897 if (include_cnic && !NO_FCOE(bp)) {
1898 rx++;
1899 tx++;
6383c0b3 1900 }
6383c0b3
AE
1901
1902 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1903 if (rc) {
1904 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1905 return rc;
1906 }
1907 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1908 if (rc) {
1909 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1910 return rc;
1911 }
1912
51c1a580 1913 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1914 tx, rx);
1915
ec6ba945
VZ
1916 return rc;
1917}
1918
1191cb83 1919static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1920{
1921 int i;
1922
1923 for_each_queue(bp, i) {
1924 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1925 u32 mtu;
a8c94b91
VZ
1926
1927 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1928 if (IS_FCOE_IDX(i))
1929 /*
1930 * Although there are no IP frames expected to arrive to
1931 * this ring we still want to add an
1932 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1933 * overrun attack.
1934 */
e52fcb24 1935 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1936 else
e52fcb24
ED
1937 mtu = bp->dev->mtu;
1938 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1939 IP_HEADER_ALIGNMENT_PADDING +
1940 ETH_OVREHEAD +
1941 mtu +
1942 BNX2X_FW_RX_ALIGN_END;
16a5fd92 1943 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
1944 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1945 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1946 else
1947 fp->rx_frag_size = 0;
a8c94b91
VZ
1948 }
1949}
1950
60cad4e6 1951static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
1952{
1953 int i;
619c5cb6
VZ
1954 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1955
16a5fd92 1956 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
1957 * enabled
1958 */
5d317c6a
MS
1959 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1960 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1961 bp->fp->cl_id +
1962 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1963
1964 /*
1965 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1966 * per-port, so if explicit configuration is needed , do it only
1967 * for a PMF.
1968 *
1969 * For 57712 and newer on the other hand it's a per-function
1970 * configuration.
1971 */
5d317c6a 1972 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1973}
1974
60cad4e6
AE
1975int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1976 bool config_hash, bool enable)
619c5cb6 1977{
3b603066 1978 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1979
1980 /* Although RSS is meaningless when there is a single HW queue we
1981 * still need it enabled in order to have HW Rx hash generated.
1982 *
1983 * if (!is_eth_multi(bp))
1984 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1985 */
1986
96305234 1987 params.rss_obj = rss_obj;
619c5cb6
VZ
1988
1989 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1990
60cad4e6
AE
1991 if (enable) {
1992 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1993
1994 /* RSS configuration */
1995 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1996 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1997 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1998 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1999 if (rss_obj->udp_rss_v4)
2000 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2001 if (rss_obj->udp_rss_v6)
2002 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2003 } else {
2004 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2005 }
619c5cb6 2006
96305234
DK
2007 /* Hash bits */
2008 params.rss_result_mask = MULTI_MASK;
619c5cb6 2009
5d317c6a 2010 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2011
96305234
DK
2012 if (config_hash) {
2013 /* RSS keys */
60cad4e6 2014 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2015 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2016 }
2017
60cad4e6
AE
2018 if (IS_PF(bp))
2019 return bnx2x_config_rss(bp, &params);
2020 else
2021 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2022}
2023
1191cb83 2024static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2025{
3b603066 2026 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2027
2028 /* Prepare parameters for function state transitions */
2029 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2030
2031 func_params.f_obj = &bp->func_obj;
2032 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2033
2034 func_params.params.hw_init.load_phase = load_code;
2035
2036 return bnx2x_func_state_change(bp, &func_params);
2037}
2038
2039/*
2040 * Cleans the object that have internal lists without sending
16a5fd92 2041 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2042 */
7fa6f340 2043void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2044{
2045 int rc;
2046 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2047 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2048 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2049
2050 /***************** Cleanup MACs' object first *************************/
2051
2052 /* Wait for completion of requested */
2053 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2054 /* Perform a dry cleanup */
2055 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2056
2057 /* Clean ETH primary MAC */
2058 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2059 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2060 &ramrod_flags);
2061 if (rc != 0)
2062 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2063
2064 /* Cleanup UC list */
2065 vlan_mac_flags = 0;
2066 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2067 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2068 &ramrod_flags);
2069 if (rc != 0)
2070 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2071
2072 /***************** Now clean mcast object *****************************/
2073 rparam.mcast_obj = &bp->mcast_obj;
2074 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2075
8b09be5f
YM
2076 /* Add a DEL command... - Since we're doing a driver cleanup only,
2077 * we take a lock surrounding both the initial send and the CONTs,
2078 * as we don't want a true completion to disrupt us in the middle.
2079 */
2080 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2081 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2082 if (rc < 0)
51c1a580
MS
2083 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2084 rc);
619c5cb6
VZ
2085
2086 /* ...and wait until all pending commands are cleared */
2087 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2088 while (rc != 0) {
2089 if (rc < 0) {
2090 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2091 rc);
8b09be5f 2092 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2093 return;
2094 }
2095
2096 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2097 }
8b09be5f 2098 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2099}
2100
2101#ifndef BNX2X_STOP_ON_ERROR
2102#define LOAD_ERROR_EXIT(bp, label) \
2103 do { \
2104 (bp)->state = BNX2X_STATE_ERROR; \
2105 goto label; \
2106 } while (0)
55c11941
MS
2107
2108#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2109 do { \
2110 bp->cnic_loaded = false; \
2111 goto label; \
2112 } while (0)
2113#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2114#define LOAD_ERROR_EXIT(bp, label) \
2115 do { \
2116 (bp)->state = BNX2X_STATE_ERROR; \
2117 (bp)->panic = 1; \
2118 return -EBUSY; \
2119 } while (0)
55c11941
MS
2120#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2121 do { \
2122 bp->cnic_loaded = false; \
2123 (bp)->panic = 1; \
2124 return -EBUSY; \
2125 } while (0)
2126#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2127
ad5afc89
AE
2128static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2129{
2130 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2131 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2132 return;
2133}
2134
2135static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2136{
8db573ba 2137 int num_groups, vf_headroom = 0;
ad5afc89 2138 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2139
ad5afc89
AE
2140 /* number of queues for statistics is number of eth queues + FCoE */
2141 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2142
ad5afc89
AE
2143 /* Total number of FW statistics requests =
2144 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2145 * and fcoe l2 queue) stats + num of queues (which includes another 1
2146 * for fcoe l2 queue if applicable)
2147 */
2148 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2149
8db573ba
AE
2150 /* vf stats appear in the request list, but their data is allocated by
2151 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2152 * it is used to determine where to place the vf stats queries in the
2153 * request struct
2154 */
2155 if (IS_SRIOV(bp))
6411280a 2156 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2157
ad5afc89
AE
2158 /* Request is built from stats_query_header and an array of
2159 * stats_query_cmd_group each of which contains
2160 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2161 * configured in the stats_query_header.
2162 */
2163 num_groups =
8db573ba
AE
2164 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2165 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2166 1 : 0));
2167
8db573ba
AE
2168 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2169 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2170 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2171 num_groups * sizeof(struct stats_query_cmd_group);
2172
2173 /* Data for statistics requests + stats_counter
2174 * stats_counter holds per-STORM counters that are incremented
2175 * when STORM has finished with the current request.
2176 * memory for FCoE offloaded statistics are counted anyway,
2177 * even if they will not be sent.
2178 * VF stats are not accounted for here as the data of VF stats is stored
2179 * in memory allocated by the VF, not here.
2180 */
2181 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2182 sizeof(struct per_pf_stats) +
2183 sizeof(struct fcoe_statistics_params) +
2184 sizeof(struct per_queue_stats) * num_queue_stats +
2185 sizeof(struct stats_counter);
2186
2187 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2188 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2189
2190 /* Set shortcuts */
2191 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2192 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2193 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2194 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2195 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2196 bp->fw_stats_req_sz;
2197
6bf07b8e 2198 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2199 U64_HI(bp->fw_stats_req_mapping),
2200 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2201 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2202 U64_HI(bp->fw_stats_data_mapping),
2203 U64_LO(bp->fw_stats_data_mapping));
2204 return 0;
2205
2206alloc_mem_err:
2207 bnx2x_free_fw_stats_mem(bp);
2208 BNX2X_ERR("Can't allocate FW stats memory\n");
2209 return -ENOMEM;
2210}
2211
2212/* send load request to mcp and analyze response */
2213static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2214{
178135c1
DK
2215 u32 param;
2216
ad5afc89
AE
2217 /* init fw_seq */
2218 bp->fw_seq =
2219 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2220 DRV_MSG_SEQ_NUMBER_MASK);
2221 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2222
2223 /* Get current FW pulse sequence */
2224 bp->fw_drv_pulse_wr_seq =
2225 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2226 DRV_PULSE_SEQ_MASK);
2227 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2228
178135c1
DK
2229 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2230
2231 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2232 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2233
ad5afc89 2234 /* load request */
178135c1 2235 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2236
2237 /* if mcp fails to respond we must abort */
2238 if (!(*load_code)) {
2239 BNX2X_ERR("MCP response failure, aborting\n");
2240 return -EBUSY;
2241 }
2242
2243 /* If mcp refused (e.g. other port is in diagnostic mode) we
2244 * must abort
2245 */
2246 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2247 BNX2X_ERR("MCP refused load request, aborting\n");
2248 return -EBUSY;
2249 }
2250 return 0;
2251}
2252
2253/* check whether another PF has already loaded FW to chip. In
2254 * virtualized environments a pf from another VM may have already
2255 * initialized the device including loading FW
2256 */
2257int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2258{
2259 /* is another pf loaded on this engine? */
2260 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2261 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2262 /* build my FW version dword */
2263 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2264 (BCM_5710_FW_MINOR_VERSION << 8) +
2265 (BCM_5710_FW_REVISION_VERSION << 16) +
2266 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2267
2268 /* read loaded FW from chip */
2269 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2270
2271 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2272 loaded_fw, my_fw);
2273
2274 /* abort nic load if version mismatch */
2275 if (my_fw != loaded_fw) {
6bf07b8e 2276 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
452427b0 2277 loaded_fw, my_fw);
ad5afc89
AE
2278 return -EBUSY;
2279 }
2280 }
2281 return 0;
2282}
2283
2284/* returns the "mcp load_code" according to global load_count array */
2285static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2286{
2287 int path = BP_PATH(bp);
2288
2289 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2290 path, load_count[path][0], load_count[path][1],
2291 load_count[path][2]);
2292 load_count[path][0]++;
2293 load_count[path][1 + port]++;
2294 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2295 path, load_count[path][0], load_count[path][1],
2296 load_count[path][2]);
2297 if (load_count[path][0] == 1)
2298 return FW_MSG_CODE_DRV_LOAD_COMMON;
2299 else if (load_count[path][1 + port] == 1)
2300 return FW_MSG_CODE_DRV_LOAD_PORT;
2301 else
2302 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2303}
2304
2305/* mark PMF if applicable */
2306static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2307{
2308 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2309 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2310 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2311 bp->port.pmf = 1;
2312 /* We need the barrier to ensure the ordering between the
2313 * writing to bp->port.pmf here and reading it from the
2314 * bnx2x_periodic_task().
2315 */
2316 smp_mb();
2317 } else {
2318 bp->port.pmf = 0;
452427b0
YM
2319 }
2320
ad5afc89
AE
2321 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2322}
2323
2324static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2325{
2326 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2327 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2328 (bp->common.shmem2_base)) {
2329 if (SHMEM2_HAS(bp, dcc_support))
2330 SHMEM2_WR(bp, dcc_support,
2331 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2332 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2333 if (SHMEM2_HAS(bp, afex_driver_support))
2334 SHMEM2_WR(bp, afex_driver_support,
2335 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2336 }
2337
2338 /* Set AFEX default VLAN tag to an invalid value */
2339 bp->afex_def_vlan_tag = -1;
452427b0
YM
2340}
2341
1191cb83
ED
2342/**
2343 * bnx2x_bz_fp - zero content of the fastpath structure.
2344 *
2345 * @bp: driver handle
2346 * @index: fastpath index to be zeroed
2347 *
2348 * Makes sure the contents of the bp->fp[index].napi is kept
2349 * intact.
2350 */
2351static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2352{
2353 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2354 int cos;
1191cb83 2355 struct napi_struct orig_napi = fp->napi;
15192a8c 2356 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2357
1191cb83 2358 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2359 if (fp->tpa_info)
2360 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2361 sizeof(struct bnx2x_agg_info));
2362 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2363
2364 /* Restore the NAPI object as it has been already initialized */
2365 fp->napi = orig_napi;
15192a8c 2366 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2367 fp->bp = bp;
2368 fp->index = index;
2369 if (IS_ETH_FP(fp))
2370 fp->max_cos = bp->max_cos;
2371 else
2372 /* Special queues support only one CoS */
2373 fp->max_cos = 1;
2374
65565884 2375 /* Init txdata pointers */
65565884
MS
2376 if (IS_FCOE_FP(fp))
2377 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2378 if (IS_ETH_FP(fp))
2379 for_each_cos_in_tx_queue(fp, cos)
2380 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2381 BNX2X_NUM_ETH_QUEUES(bp) + index];
2382
16a5fd92 2383 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2384 * minimal size so it must be set prior to queue memory allocation
2385 */
2386 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2387 (bp->flags & GRO_ENABLE_FLAG &&
2388 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2389 if (bp->flags & TPA_ENABLE_FLAG)
2390 fp->mode = TPA_MODE_LRO;
2391 else if (bp->flags & GRO_ENABLE_FLAG)
2392 fp->mode = TPA_MODE_GRO;
2393
1191cb83
ED
2394 /* We don't want TPA on an FCoE L2 ring */
2395 if (IS_FCOE_FP(fp))
2396 fp->disable_tpa = 1;
55c11941
MS
2397}
2398
2399int bnx2x_load_cnic(struct bnx2x *bp)
2400{
2401 int i, rc, port = BP_PORT(bp);
2402
2403 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2404
2405 mutex_init(&bp->cnic_mutex);
2406
ad5afc89
AE
2407 if (IS_PF(bp)) {
2408 rc = bnx2x_alloc_mem_cnic(bp);
2409 if (rc) {
2410 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2411 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2412 }
55c11941
MS
2413 }
2414
2415 rc = bnx2x_alloc_fp_mem_cnic(bp);
2416 if (rc) {
2417 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2418 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2419 }
2420
2421 /* Update the number of queues with the cnic queues */
2422 rc = bnx2x_set_real_num_queues(bp, 1);
2423 if (rc) {
2424 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2425 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2426 }
2427
2428 /* Add all CNIC NAPI objects */
2429 bnx2x_add_all_napi_cnic(bp);
2430 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2431 bnx2x_napi_enable_cnic(bp);
2432
2433 rc = bnx2x_init_hw_func_cnic(bp);
2434 if (rc)
2435 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2436
2437 bnx2x_nic_init_cnic(bp);
2438
ad5afc89
AE
2439 if (IS_PF(bp)) {
2440 /* Enable Timer scan */
2441 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2442
2443 /* setup cnic queues */
2444 for_each_cnic_queue(bp, i) {
2445 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2446 if (rc) {
2447 BNX2X_ERR("Queue setup failed\n");
2448 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2449 }
55c11941
MS
2450 }
2451 }
2452
2453 /* Initialize Rx filter. */
8b09be5f 2454 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2455
2456 /* re-read iscsi info */
2457 bnx2x_get_iscsi_info(bp);
2458 bnx2x_setup_cnic_irq_info(bp);
2459 bnx2x_setup_cnic_info(bp);
2460 bp->cnic_loaded = true;
2461 if (bp->state == BNX2X_STATE_OPEN)
2462 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2463
55c11941
MS
2464 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2465
2466 return 0;
2467
2468#ifndef BNX2X_STOP_ON_ERROR
2469load_error_cnic2:
2470 /* Disable Timer scan */
2471 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2472
2473load_error_cnic1:
2474 bnx2x_napi_disable_cnic(bp);
2475 /* Update the number of queues without the cnic queues */
2476 rc = bnx2x_set_real_num_queues(bp, 0);
2477 if (rc)
2478 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2479load_error_cnic0:
2480 BNX2X_ERR("CNIC-related load failed\n");
2481 bnx2x_free_fp_mem_cnic(bp);
2482 bnx2x_free_mem_cnic(bp);
2483 return rc;
2484#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2485}
2486
9f6c9258
DK
2487/* must be called with rtnl_lock */
2488int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2489{
619c5cb6 2490 int port = BP_PORT(bp);
ad5afc89 2491 int i, rc = 0, load_code = 0;
9f6c9258 2492
55c11941
MS
2493 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2494 DP(NETIF_MSG_IFUP,
2495 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2496
9f6c9258 2497#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2498 if (unlikely(bp->panic)) {
2499 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2500 return -EPERM;
51c1a580 2501 }
9f6c9258
DK
2502#endif
2503
2504 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2505
16a5fd92 2506 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2507 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2508 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2509 &bp->last_reported_link.link_report_flags);
2ae17f66 2510
ad5afc89
AE
2511 if (IS_PF(bp))
2512 /* must be called before memory allocation and HW init */
2513 bnx2x_ilt_set_info(bp);
523224a3 2514
6383c0b3
AE
2515 /*
2516 * Zero fastpath structures preserving invariants like napi, which are
2517 * allocated only once, fp index, max_cos, bp pointer.
65565884 2518 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2519 */
51c1a580 2520 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2521 for_each_queue(bp, i)
2522 bnx2x_bz_fp(bp, i);
55c11941
MS
2523 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2524 bp->num_cnic_queues) *
2525 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2526
55c11941 2527 bp->fcoe_init = false;
6383c0b3 2528
a8c94b91
VZ
2529 /* Set the receive queues buffer size */
2530 bnx2x_set_rx_buf_size(bp);
2531
ad5afc89
AE
2532 if (IS_PF(bp)) {
2533 rc = bnx2x_alloc_mem(bp);
2534 if (rc) {
2535 BNX2X_ERR("Unable to allocate bp memory\n");
2536 return rc;
2537 }
2538 }
2539
2540 /* Allocated memory for FW statistics */
2541 if (bnx2x_alloc_fw_stats_mem(bp))
2542 LOAD_ERROR_EXIT(bp, load_error0);
2543
2544 /* need to be done after alloc mem, since it's self adjusting to amount
2545 * of memory available for RSS queues
2546 */
2547 rc = bnx2x_alloc_fp_mem(bp);
2548 if (rc) {
2549 BNX2X_ERR("Unable to allocate memory for fps\n");
2550 LOAD_ERROR_EXIT(bp, load_error0);
2551 }
d6214d7a 2552
8d9ac297
AE
2553 /* request pf to initialize status blocks */
2554 if (IS_VF(bp)) {
2555 rc = bnx2x_vfpf_init(bp);
2556 if (rc)
2557 LOAD_ERROR_EXIT(bp, load_error0);
2558 }
2559
b3b83c3f
DK
2560 /* As long as bnx2x_alloc_mem() may possibly update
2561 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2562 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2563 */
55c11941 2564 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2565 if (rc) {
ec6ba945 2566 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2567 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2568 }
2569
6383c0b3 2570 /* configure multi cos mappings in kernel.
16a5fd92
YM
2571 * this configuration may be overridden by a multi class queue
2572 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2573 */
2574 bnx2x_setup_tc(bp->dev, bp->max_cos);
2575
26614ba5
MS
2576 /* Add all NAPI objects */
2577 bnx2x_add_all_napi(bp);
55c11941 2578 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2579 bnx2x_napi_enable(bp);
2580
ad5afc89
AE
2581 if (IS_PF(bp)) {
2582 /* set pf load just before approaching the MCP */
2583 bnx2x_set_pf_load(bp);
2584
2585 /* if mcp exists send load request and analyze response */
2586 if (!BP_NOMCP(bp)) {
2587 /* attempt to load pf */
2588 rc = bnx2x_nic_load_request(bp, &load_code);
2589 if (rc)
2590 LOAD_ERROR_EXIT(bp, load_error1);
2591
2592 /* what did mcp say? */
2593 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2594 if (rc) {
2595 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2596 LOAD_ERROR_EXIT(bp, load_error2);
2597 }
ad5afc89
AE
2598 } else {
2599 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2600 }
9f6c9258 2601
ad5afc89
AE
2602 /* mark pmf if applicable */
2603 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2604
ad5afc89
AE
2605 /* Init Function state controlling object */
2606 bnx2x__init_func_obj(bp);
6383c0b3 2607
ad5afc89
AE
2608 /* Initialize HW */
2609 rc = bnx2x_init_hw(bp, load_code);
2610 if (rc) {
2611 BNX2X_ERR("HW init failed, aborting\n");
2612 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2613 LOAD_ERROR_EXIT(bp, load_error2);
2614 }
9f6c9258
DK
2615 }
2616
ecf01c22
YM
2617 bnx2x_pre_irq_nic_init(bp);
2618
d6214d7a
DK
2619 /* Connect to IRQs */
2620 rc = bnx2x_setup_irqs(bp);
523224a3 2621 if (rc) {
ad5afc89
AE
2622 BNX2X_ERR("setup irqs failed\n");
2623 if (IS_PF(bp))
2624 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2625 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2626 }
2627
619c5cb6 2628 /* Init per-function objects */
ad5afc89 2629 if (IS_PF(bp)) {
ecf01c22
YM
2630 /* Setup NIC internals and enable interrupts */
2631 bnx2x_post_irq_nic_init(bp, load_code);
2632
ad5afc89 2633 bnx2x_init_bp_objs(bp);
b56e9670 2634 bnx2x_iov_nic_init(bp);
a3348722 2635
ad5afc89
AE
2636 /* Set AFEX default VLAN tag to an invalid value */
2637 bp->afex_def_vlan_tag = -1;
2638 bnx2x_nic_load_afex_dcc(bp, load_code);
2639 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2640 rc = bnx2x_func_start(bp);
2641 if (rc) {
2642 BNX2X_ERR("Function start failed!\n");
2643 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2644
619c5cb6 2645 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2646 }
9f6c9258 2647
ad5afc89
AE
2648 /* Send LOAD_DONE command to MCP */
2649 if (!BP_NOMCP(bp)) {
2650 load_code = bnx2x_fw_command(bp,
2651 DRV_MSG_CODE_LOAD_DONE, 0);
2652 if (!load_code) {
2653 BNX2X_ERR("MCP response failure, aborting\n");
2654 rc = -EBUSY;
2655 LOAD_ERROR_EXIT(bp, load_error3);
2656 }
2657 }
9f6c9258 2658
0c14e5ce
AE
2659 /* initialize FW coalescing state machines in RAM */
2660 bnx2x_update_coalesce(bp);
60cad4e6 2661 }
0c14e5ce 2662
60cad4e6
AE
2663 /* setup the leading queue */
2664 rc = bnx2x_setup_leading(bp);
2665 if (rc) {
2666 BNX2X_ERR("Setup leading failed!\n");
2667 LOAD_ERROR_EXIT(bp, load_error3);
2668 }
ad5afc89 2669
60cad4e6
AE
2670 /* set up the rest of the queues */
2671 for_each_nondefault_eth_queue(bp, i) {
2672 if (IS_PF(bp))
2673 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2674 else /* VF */
2675 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2676 if (rc) {
60cad4e6 2677 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2678 LOAD_ERROR_EXIT(bp, load_error3);
2679 }
60cad4e6 2680 }
8d9ac297 2681
60cad4e6
AE
2682 /* setup rss */
2683 rc = bnx2x_init_rss(bp);
2684 if (rc) {
2685 BNX2X_ERR("PF RSS init failed\n");
2686 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2687 }
619c5cb6 2688
523224a3
DK
2689 /* Now when Clients are configured we are ready to work */
2690 bp->state = BNX2X_STATE_OPEN;
2691
619c5cb6 2692 /* Configure a ucast MAC */
ad5afc89
AE
2693 if (IS_PF(bp))
2694 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2695 else /* vf */
f8f4f61a
DK
2696 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2697 true);
51c1a580
MS
2698 if (rc) {
2699 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2700 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2701 }
6e30dd4e 2702
ad5afc89 2703 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2704 bnx2x_update_max_mf_config(bp, bp->pending_max);
2705 bp->pending_max = 0;
2706 }
2707
ad5afc89
AE
2708 if (bp->port.pmf) {
2709 rc = bnx2x_initial_phy_init(bp, load_mode);
2710 if (rc)
2711 LOAD_ERROR_EXIT(bp, load_error3);
2712 }
c63da990 2713 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2714
619c5cb6
VZ
2715 /* Start fast path */
2716
2717 /* Initialize Rx filter. */
8b09be5f 2718 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2719
619c5cb6 2720 /* Start the Tx */
9f6c9258
DK
2721 switch (load_mode) {
2722 case LOAD_NORMAL:
16a5fd92 2723 /* Tx queue should be only re-enabled */
523224a3 2724 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2725 break;
2726
2727 case LOAD_OPEN:
2728 netif_tx_start_all_queues(bp->dev);
523224a3 2729 smp_mb__after_clear_bit();
9f6c9258
DK
2730 break;
2731
2732 case LOAD_DIAG:
8970b2e4 2733 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2734 bp->state = BNX2X_STATE_DIAG;
2735 break;
2736
2737 default:
2738 break;
2739 }
2740
00253a8c 2741 if (bp->port.pmf)
4c704899 2742 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2743 else
9f6c9258
DK
2744 bnx2x__link_status_update(bp);
2745
2746 /* start the timer */
2747 mod_timer(&bp->timer, jiffies + bp->current_interval);
2748
55c11941
MS
2749 if (CNIC_ENABLED(bp))
2750 bnx2x_load_cnic(bp);
9f6c9258 2751
ad5afc89
AE
2752 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2753 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2754 u32 val;
2755 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2756 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2757 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2758 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2759 }
2760
619c5cb6 2761 /* Wait for all pending SP commands to complete */
ad5afc89 2762 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2763 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2764 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2765 return -EBUSY;
2766 }
6891dd25 2767
9876879f
BW
2768 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2769 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2770 bnx2x_dcbx_init(bp, false);
2771
55c11941
MS
2772 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2773
9f6c9258
DK
2774 return 0;
2775
619c5cb6 2776#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2777load_error3:
ad5afc89
AE
2778 if (IS_PF(bp)) {
2779 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2780
ad5afc89
AE
2781 /* Clean queueable objects */
2782 bnx2x_squeeze_objects(bp);
2783 }
619c5cb6 2784
9f6c9258
DK
2785 /* Free SKBs, SGEs, TPA pool and driver internals */
2786 bnx2x_free_skbs(bp);
ec6ba945 2787 for_each_rx_queue(bp, i)
9f6c9258 2788 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2789
9f6c9258 2790 /* Release IRQs */
d6214d7a
DK
2791 bnx2x_free_irq(bp);
2792load_error2:
ad5afc89 2793 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2794 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2795 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2796 }
2797
2798 bp->port.pmf = 0;
9f6c9258
DK
2799load_error1:
2800 bnx2x_napi_disable(bp);
722c6f58 2801 bnx2x_del_all_napi(bp);
ad5afc89 2802
889b9af3 2803 /* clear pf_load status, as it was already set */
ad5afc89
AE
2804 if (IS_PF(bp))
2805 bnx2x_clear_pf_load(bp);
d6214d7a 2806load_error0:
ad5afc89
AE
2807 bnx2x_free_fp_mem(bp);
2808 bnx2x_free_fw_stats_mem(bp);
9f6c9258
DK
2809 bnx2x_free_mem(bp);
2810
2811 return rc;
619c5cb6 2812#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2813}
2814
7fa6f340 2815int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2816{
2817 u8 rc = 0, cos, i;
2818
2819 /* Wait until tx fastpath tasks complete */
2820 for_each_tx_queue(bp, i) {
2821 struct bnx2x_fastpath *fp = &bp->fp[i];
2822
2823 for_each_cos_in_tx_queue(fp, cos)
2824 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2825 if (rc)
2826 return rc;
2827 }
2828 return 0;
2829}
2830
9f6c9258 2831/* must be called with rtnl_lock */
5d07d868 2832int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2833{
2834 int i;
c9ee9206
VZ
2835 bool global = false;
2836
55c11941
MS
2837 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2838
9ce392d4 2839 /* mark driver is unloaded in shmem2 */
ad5afc89 2840 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2841 u32 val;
2842 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2843 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2844 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2845 }
2846
80bfe5cc 2847 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2848 (bp->state == BNX2X_STATE_CLOSED ||
2849 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2850 /* We can get here if the driver has been unloaded
2851 * during parity error recovery and is either waiting for a
2852 * leader to complete or for other functions to unload and
2853 * then ifdown has been issued. In this case we want to
2854 * unload and let other functions to complete a recovery
2855 * process.
2856 */
9f6c9258
DK
2857 bp->recovery_state = BNX2X_RECOVERY_DONE;
2858 bp->is_leader = 0;
c9ee9206
VZ
2859 bnx2x_release_leader_lock(bp);
2860 smp_mb();
2861
51c1a580
MS
2862 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2863 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2864 return -EINVAL;
2865 }
2866
80bfe5cc 2867 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2868 * have not completed successfully - all resources are released.
80bfe5cc
YM
2869 *
2870 * we can get here only after unsuccessful ndo_* callback, during which
2871 * dev->IFF_UP flag is still on.
2872 */
2873 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2874 return 0;
2875
2876 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2877 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2878 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2879 */
2880 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2881 smp_mb();
2882
78c3bcc5
AE
2883 /* indicate to VFs that the PF is going down */
2884 bnx2x_iov_channel_down(bp);
2885
55c11941
MS
2886 if (CNIC_LOADED(bp))
2887 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2888
9505ee37
VZ
2889 /* Stop Tx */
2890 bnx2x_tx_disable(bp);
65565884 2891 netdev_reset_tc(bp->dev);
9505ee37 2892
9f6c9258 2893 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2894
9f6c9258 2895 del_timer_sync(&bp->timer);
f85582f8 2896
ad5afc89
AE
2897 if (IS_PF(bp)) {
2898 /* Set ALWAYS_ALIVE bit in shmem */
2899 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2900 bnx2x_drv_pulse(bp);
2901 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2902 bnx2x_save_statistics(bp);
2903 }
9f6c9258 2904
ad5afc89
AE
2905 /* wait till consumers catch up with producers in all queues */
2906 bnx2x_drain_tx_queues(bp);
9f6c9258 2907
9b176b6b
AE
2908 /* if VF indicate to PF this function is going down (PF will delete sp
2909 * elements and clear initializations
2910 */
2911 if (IS_VF(bp))
2912 bnx2x_vfpf_close_vf(bp);
2913 else if (unload_mode != UNLOAD_RECOVERY)
2914 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2915 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2916 else {
c9ee9206
VZ
2917 /* Send the UNLOAD_REQUEST to the MCP */
2918 bnx2x_send_unload_req(bp, unload_mode);
2919
16a5fd92 2920 /* Prevent transactions to host from the functions on the
c9ee9206 2921 * engine that doesn't reset global blocks in case of global
16a5fd92 2922 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
2923 * (the engine which leader will perform the recovery
2924 * last).
2925 */
2926 if (!CHIP_IS_E1x(bp))
2927 bnx2x_pf_disable(bp);
2928
2929 /* Disable HW interrupts, NAPI */
523224a3 2930 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2931 /* Delete all NAPI objects */
2932 bnx2x_del_all_napi(bp);
55c11941
MS
2933 if (CNIC_LOADED(bp))
2934 bnx2x_del_all_napi_cnic(bp);
523224a3 2935 /* Release IRQs */
d6214d7a 2936 bnx2x_free_irq(bp);
c9ee9206
VZ
2937
2938 /* Report UNLOAD_DONE to MCP */
5d07d868 2939 bnx2x_send_unload_done(bp, false);
523224a3 2940 }
9f6c9258 2941
619c5cb6 2942 /*
16a5fd92 2943 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
2944 * the queueable objects here in case they failed to get cleaned so far.
2945 */
ad5afc89
AE
2946 if (IS_PF(bp))
2947 bnx2x_squeeze_objects(bp);
619c5cb6 2948
79616895
VZ
2949 /* There should be no more pending SP commands at this stage */
2950 bp->sp_state = 0;
2951
9f6c9258
DK
2952 bp->port.pmf = 0;
2953
2954 /* Free SKBs, SGEs, TPA pool and driver internals */
2955 bnx2x_free_skbs(bp);
55c11941
MS
2956 if (CNIC_LOADED(bp))
2957 bnx2x_free_skbs_cnic(bp);
ec6ba945 2958 for_each_rx_queue(bp, i)
9f6c9258 2959 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2960
ad5afc89
AE
2961 bnx2x_free_fp_mem(bp);
2962 if (CNIC_LOADED(bp))
55c11941 2963 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2964
ad5afc89 2965 if (IS_PF(bp)) {
ad5afc89
AE
2966 if (CNIC_LOADED(bp))
2967 bnx2x_free_mem_cnic(bp);
2968 }
b4cddbd6
AE
2969 bnx2x_free_mem(bp);
2970
9f6c9258 2971 bp->state = BNX2X_STATE_CLOSED;
55c11941 2972 bp->cnic_loaded = false;
9f6c9258 2973
c9ee9206
VZ
2974 /* Check if there are pending parity attentions. If there are - set
2975 * RECOVERY_IN_PROGRESS.
2976 */
ad5afc89 2977 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2978 bnx2x_set_reset_in_progress(bp);
2979
2980 /* Set RESET_IS_GLOBAL if needed */
2981 if (global)
2982 bnx2x_set_reset_global(bp);
2983 }
2984
9f6c9258
DK
2985 /* The last driver must disable a "close the gate" if there is no
2986 * parity attention or "process kill" pending.
2987 */
ad5afc89
AE
2988 if (IS_PF(bp) &&
2989 !bnx2x_clear_pf_load(bp) &&
2990 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2991 bnx2x_disable_close_the_gate(bp);
2992
55c11941
MS
2993 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2994
9f6c9258
DK
2995 return 0;
2996}
f85582f8 2997
9f6c9258
DK
2998int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2999{
3000 u16 pmcsr;
3001
adf5f6a1
DK
3002 /* If there is no power capability, silently succeed */
3003 if (!bp->pm_cap) {
51c1a580 3004 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3005 return 0;
3006 }
3007
9f6c9258
DK
3008 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3009
3010 switch (state) {
3011 case PCI_D0:
3012 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3013 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3014 PCI_PM_CTRL_PME_STATUS));
3015
3016 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3017 /* delay required during transition out of D3hot */
3018 msleep(20);
3019 break;
3020
3021 case PCI_D3hot:
3022 /* If there are other clients above don't
3023 shut down the power */
3024 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3025 return 0;
3026 /* Don't shut down the power for emulation and FPGA */
3027 if (CHIP_REV_IS_SLOW(bp))
3028 return 0;
3029
3030 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3031 pmcsr |= 3;
3032
3033 if (bp->wol)
3034 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3035
3036 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3037 pmcsr);
3038
3039 /* No more memory access after this point until
3040 * device is brought back to D0.
3041 */
3042 break;
3043
3044 default:
51c1a580 3045 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3046 return -EINVAL;
3047 }
3048 return 0;
3049}
3050
9f6c9258
DK
3051/*
3052 * net_device service functions
3053 */
d6214d7a 3054int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3055{
3056 int work_done = 0;
6383c0b3 3057 u8 cos;
9f6c9258
DK
3058 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3059 napi);
3060 struct bnx2x *bp = fp->bp;
3061
3062 while (1) {
3063#ifdef BNX2X_STOP_ON_ERROR
3064 if (unlikely(bp->panic)) {
3065 napi_complete(napi);
3066 return 0;
3067 }
3068#endif
8f20aa57
DK
3069 if (!bnx2x_fp_lock_napi(fp))
3070 return work_done;
9f6c9258 3071
6383c0b3 3072 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3073 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3074 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3075
9f6c9258
DK
3076 if (bnx2x_has_rx_work(fp)) {
3077 work_done += bnx2x_rx_int(fp, budget - work_done);
3078
3079 /* must not complete if we consumed full budget */
8f20aa57
DK
3080 if (work_done >= budget) {
3081 bnx2x_fp_unlock_napi(fp);
9f6c9258 3082 break;
8f20aa57 3083 }
9f6c9258
DK
3084 }
3085
3086 /* Fall out from the NAPI loop if needed */
8f20aa57
DK
3087 if (!bnx2x_fp_unlock_napi(fp) &&
3088 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3089
ec6ba945
VZ
3090 /* No need to update SB for FCoE L2 ring as long as
3091 * it's connected to the default SB and the SB
3092 * has been updated when NAPI was scheduled.
3093 */
3094 if (IS_FCOE_FP(fp)) {
3095 napi_complete(napi);
3096 break;
3097 }
9f6c9258 3098 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3099 /* bnx2x_has_rx_work() reads the status block,
3100 * thus we need to ensure that status block indices
3101 * have been actually read (bnx2x_update_fpsb_idx)
3102 * prior to this check (bnx2x_has_rx_work) so that
3103 * we won't write the "newer" value of the status block
3104 * to IGU (if there was a DMA right after
3105 * bnx2x_has_rx_work and if there is no rmb, the memory
3106 * reading (bnx2x_update_fpsb_idx) may be postponed
3107 * to right before bnx2x_ack_sb). In this case there
3108 * will never be another interrupt until there is
3109 * another update of the status block, while there
3110 * is still unhandled work.
3111 */
9f6c9258
DK
3112 rmb();
3113
3114 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3115 napi_complete(napi);
3116 /* Re-enable interrupts */
51c1a580 3117 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3118 "Update index to %d\n", fp->fp_hc_idx);
3119 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3120 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3121 IGU_INT_ENABLE, 1);
3122 break;
3123 }
3124 }
3125 }
3126
3127 return work_done;
3128}
3129
e0d1095a 3130#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3131/* must be called with local_bh_disable()d */
3132int bnx2x_low_latency_recv(struct napi_struct *napi)
3133{
3134 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3135 napi);
3136 struct bnx2x *bp = fp->bp;
3137 int found = 0;
3138
3139 if ((bp->state == BNX2X_STATE_CLOSED) ||
3140 (bp->state == BNX2X_STATE_ERROR) ||
3141 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3142 return LL_FLUSH_FAILED;
3143
3144 if (!bnx2x_fp_lock_poll(fp))
3145 return LL_FLUSH_BUSY;
3146
75b29459 3147 if (bnx2x_has_rx_work(fp))
8f20aa57 3148 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3149
3150 bnx2x_fp_unlock_poll(fp);
3151
3152 return found;
3153}
3154#endif
3155
9f6c9258
DK
3156/* we split the first BD into headers and data BDs
3157 * to ease the pain of our fellow microcode engineers
3158 * we use one mapping for both BDs
9f6c9258 3159 */
91226790
DK
3160static u16 bnx2x_tx_split(struct bnx2x *bp,
3161 struct bnx2x_fp_txdata *txdata,
3162 struct sw_tx_bd *tx_buf,
3163 struct eth_tx_start_bd **tx_bd, u16 hlen,
3164 u16 bd_prod)
9f6c9258
DK
3165{
3166 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3167 struct eth_tx_bd *d_tx_bd;
3168 dma_addr_t mapping;
3169 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3170
3171 /* first fix first BD */
9f6c9258
DK
3172 h_tx_bd->nbytes = cpu_to_le16(hlen);
3173
91226790
DK
3174 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3175 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3176
3177 /* now get a new data BD
3178 * (after the pbd) and fill it */
3179 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3180 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3181
3182 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3183 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3184
3185 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3186 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3187 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3188
3189 /* this marks the BD as one that has no individual mapping */
3190 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3191
3192 DP(NETIF_MSG_TX_QUEUED,
3193 "TSO split data size is %d (%x:%x)\n",
3194 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3195
3196 /* update tx_bd */
3197 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3198
3199 return bd_prod;
3200}
3201
86564c3f
YM
3202#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3203#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3204static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3205{
86564c3f
YM
3206 __sum16 tsum = (__force __sum16) csum;
3207
9f6c9258 3208 if (fix > 0)
86564c3f
YM
3209 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3210 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3211
3212 else if (fix < 0)
86564c3f
YM
3213 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3214 csum_partial(t_header, -fix, 0)));
9f6c9258 3215
e2593fcd 3216 return bswab16(tsum);
9f6c9258
DK
3217}
3218
91226790 3219static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3220{
3221 u32 rc;
a848ade4
DK
3222 __u8 prot = 0;
3223 __be16 protocol;
9f6c9258
DK
3224
3225 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3226 return XMIT_PLAIN;
9f6c9258 3227
a848ade4
DK
3228 protocol = vlan_get_protocol(skb);
3229 if (protocol == htons(ETH_P_IPV6)) {
3230 rc = XMIT_CSUM_V6;
3231 prot = ipv6_hdr(skb)->nexthdr;
3232 } else {
3233 rc = XMIT_CSUM_V4;
3234 prot = ip_hdr(skb)->protocol;
3235 }
9f6c9258 3236
a848ade4
DK
3237 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3238 if (inner_ip_hdr(skb)->version == 6) {
3239 rc |= XMIT_CSUM_ENC_V6;
3240 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3241 rc |= XMIT_CSUM_TCP;
9f6c9258 3242 } else {
a848ade4
DK
3243 rc |= XMIT_CSUM_ENC_V4;
3244 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3245 rc |= XMIT_CSUM_TCP;
3246 }
3247 }
a848ade4
DK
3248 if (prot == IPPROTO_TCP)
3249 rc |= XMIT_CSUM_TCP;
9f6c9258 3250
a848ade4 3251 if (skb_is_gso_v6(skb)) {
e768fb29 3252 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
a848ade4
DK
3253 if (rc & XMIT_CSUM_ENC)
3254 rc |= XMIT_GSO_ENC_V6;
3255 } else if (skb_is_gso(skb)) {
e768fb29 3256 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
a848ade4
DK
3257 if (rc & XMIT_CSUM_ENC)
3258 rc |= XMIT_GSO_ENC_V4;
3259 }
9f6c9258
DK
3260
3261 return rc;
3262}
3263
3264#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3265/* check if packet requires linearization (packet is too fragmented)
3266 no need to check fragmentation if page size > 8K (there will be no
3267 violation to FW restrictions) */
3268static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3269 u32 xmit_type)
3270{
3271 int to_copy = 0;
3272 int hlen = 0;
3273 int first_bd_sz = 0;
3274
3275 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3276 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3277
3278 if (xmit_type & XMIT_GSO) {
3279 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3280 /* Check if LSO packet needs to be copied:
3281 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3282 int wnd_size = MAX_FETCH_BD - 3;
3283 /* Number of windows to check */
3284 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3285 int wnd_idx = 0;
3286 int frag_idx = 0;
3287 u32 wnd_sum = 0;
3288
3289 /* Headers length */
3290 hlen = (int)(skb_transport_header(skb) - skb->data) +
3291 tcp_hdrlen(skb);
3292
3293 /* Amount of data (w/o headers) on linear part of SKB*/
3294 first_bd_sz = skb_headlen(skb) - hlen;
3295
3296 wnd_sum = first_bd_sz;
3297
3298 /* Calculate the first sum - it's special */
3299 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3300 wnd_sum +=
9e903e08 3301 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3302
3303 /* If there was data on linear skb data - check it */
3304 if (first_bd_sz > 0) {
3305 if (unlikely(wnd_sum < lso_mss)) {
3306 to_copy = 1;
3307 goto exit_lbl;
3308 }
3309
3310 wnd_sum -= first_bd_sz;
3311 }
3312
3313 /* Others are easier: run through the frag list and
3314 check all windows */
3315 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3316 wnd_sum +=
9e903e08 3317 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3318
3319 if (unlikely(wnd_sum < lso_mss)) {
3320 to_copy = 1;
3321 break;
3322 }
3323 wnd_sum -=
9e903e08 3324 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3325 }
3326 } else {
3327 /* in non-LSO too fragmented packet should always
3328 be linearized */
3329 to_copy = 1;
3330 }
3331 }
3332
3333exit_lbl:
3334 if (unlikely(to_copy))
3335 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3336 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3337 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3338 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3339
3340 return to_copy;
3341}
3342#endif
3343
91226790
DK
3344static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3345 u32 xmit_type)
f2e0899f 3346{
a848ade4
DK
3347 struct ipv6hdr *ipv6;
3348
2297a2da
VZ
3349 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3350 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3351 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3352
3353 if (xmit_type & XMIT_GSO_ENC_V6)
3354 ipv6 = inner_ipv6_hdr(skb);
3355 else if (xmit_type & XMIT_GSO_V6)
3356 ipv6 = ipv6_hdr(skb);
3357 else
3358 ipv6 = NULL;
3359
3360 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3361 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3362}
3363
3364/**
e8920674 3365 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3366 *
e8920674
DK
3367 * @skb: packet skb
3368 * @pbd: parse BD
3369 * @xmit_type: xmit flags
f2e0899f 3370 */
91226790
DK
3371static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3372 struct eth_tx_parse_bd_e1x *pbd,
057cf65e 3373 struct eth_tx_start_bd *tx_start_bd,
91226790 3374 u32 xmit_type)
f2e0899f
DK
3375{
3376 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3377 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3378 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3379
3380 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3381 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3382 pbd->tcp_pseudo_csum =
86564c3f
YM
3383 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3384 ip_hdr(skb)->daddr,
3385 0, IPPROTO_TCP, 0));
f2e0899f 3386
057cf65e
YM
3387 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3388 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3389 } else {
f2e0899f 3390 pbd->tcp_pseudo_csum =
86564c3f
YM
3391 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3392 &ipv6_hdr(skb)->daddr,
3393 0, IPPROTO_TCP, 0));
057cf65e 3394 }
f2e0899f 3395
86564c3f
YM
3396 pbd->global_data |=
3397 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3398}
f85582f8 3399
a848ade4
DK
3400/**
3401 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3402 *
3403 * @bp: driver handle
3404 * @skb: packet skb
3405 * @parsing_data: data to be updated
3406 * @xmit_type: xmit flags
3407 *
3408 * 57712/578xx related, when skb has encapsulation
3409 */
3410static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3411 u32 *parsing_data, u32 xmit_type)
3412{
3413 *parsing_data |=
3414 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3415 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3416 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3417
3418 if (xmit_type & XMIT_CSUM_TCP) {
3419 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3420 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3421 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3422
3423 return skb_inner_transport_header(skb) +
3424 inner_tcp_hdrlen(skb) - skb->data;
3425 }
3426
3427 /* We support checksum offload for TCP and UDP only.
3428 * No need to pass the UDP header length - it's a constant.
3429 */
3430 return skb_inner_transport_header(skb) +
3431 sizeof(struct udphdr) - skb->data;
3432}
3433
f2e0899f 3434/**
e8920674 3435 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3436 *
e8920674
DK
3437 * @bp: driver handle
3438 * @skb: packet skb
3439 * @parsing_data: data to be updated
3440 * @xmit_type: xmit flags
f2e0899f 3441 *
91226790 3442 * 57712/578xx related
f2e0899f 3443 */
91226790
DK
3444static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3445 u32 *parsing_data, u32 xmit_type)
f2e0899f 3446{
e39aece7 3447 *parsing_data |=
2de67439 3448 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3449 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3450 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3451
e39aece7
VZ
3452 if (xmit_type & XMIT_CSUM_TCP) {
3453 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3454 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3455 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3456
e39aece7 3457 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3458 }
3459 /* We support checksum offload for TCP and UDP only.
3460 * No need to pass the UDP header length - it's a constant.
3461 */
3462 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3463}
3464
a848ade4 3465/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3466static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3467 struct eth_tx_start_bd *tx_start_bd,
3468 u32 xmit_type)
93ef5c02 3469{
93ef5c02
DK
3470 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3471
a848ade4 3472 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3473 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3474
3475 if (!(xmit_type & XMIT_CSUM_TCP))
3476 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3477}
3478
f2e0899f 3479/**
e8920674 3480 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3481 *
e8920674
DK
3482 * @bp: driver handle
3483 * @skb: packet skb
3484 * @pbd: parse BD to be updated
3485 * @xmit_type: xmit flags
f2e0899f 3486 */
91226790
DK
3487static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3488 struct eth_tx_parse_bd_e1x *pbd,
3489 u32 xmit_type)
f2e0899f 3490{
e39aece7 3491 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3492
3493 /* for now NS flag is not used in Linux */
3494 pbd->global_data =
86564c3f
YM
3495 cpu_to_le16(hlen |
3496 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3497 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3498
3499 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3500 skb_network_header(skb)) >> 1;
f2e0899f 3501
e39aece7
VZ
3502 hlen += pbd->ip_hlen_w;
3503
3504 /* We support checksum offload for TCP and UDP only */
3505 if (xmit_type & XMIT_CSUM_TCP)
3506 hlen += tcp_hdrlen(skb) / 2;
3507 else
3508 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3509
3510 pbd->total_hlen_w = cpu_to_le16(hlen);
3511 hlen = hlen*2;
3512
3513 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3514 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3515
3516 } else {
3517 s8 fix = SKB_CS_OFF(skb); /* signed! */
3518
3519 DP(NETIF_MSG_TX_QUEUED,
3520 "hlen %d fix %d csum before fix %x\n",
3521 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3522
3523 /* HW bug: fixup the CSUM */
3524 pbd->tcp_pseudo_csum =
3525 bnx2x_csum_fix(skb_transport_header(skb),
3526 SKB_CS(skb), fix);
3527
3528 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3529 pbd->tcp_pseudo_csum);
3530 }
3531
3532 return hlen;
3533}
f85582f8 3534
a848ade4
DK
3535static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3536 struct eth_tx_parse_bd_e2 *pbd_e2,
3537 struct eth_tx_parse_2nd_bd *pbd2,
3538 u16 *global_data,
3539 u32 xmit_type)
3540{
e287a75c 3541 u16 hlen_w = 0;
a848ade4 3542 u8 outerip_off, outerip_len = 0;
e768fb29 3543
e287a75c
DK
3544 /* from outer IP to transport */
3545 hlen_w = (skb_inner_transport_header(skb) -
3546 skb_network_header(skb)) >> 1;
a848ade4
DK
3547
3548 /* transport len */
e768fb29 3549 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3550
e287a75c 3551 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3552
e768fb29
DK
3553 /* outer IP header info */
3554 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3555 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3556 u32 csum = (__force u32)(~iph->check) -
3557 (__force u32)iph->tot_len -
3558 (__force u32)iph->frag_off;
c957d09f 3559
a848ade4 3560 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3561 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3562 } else {
3563 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3564 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3565 }
3566
3567 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3568
3569 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3570
3571 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3572 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3573
3574 pbd_e2->data.tunnel_data.pseudo_csum =
3575 bswab16(~csum_tcpudp_magic(
3576 inner_ip_hdr(skb)->saddr,
3577 inner_ip_hdr(skb)->daddr,
3578 0, IPPROTO_TCP, 0));
3579
3580 outerip_len = ip_hdr(skb)->ihl << 1;
3581 } else {
3582 pbd_e2->data.tunnel_data.pseudo_csum =
3583 bswab16(~csum_ipv6_magic(
3584 &inner_ipv6_hdr(skb)->saddr,
3585 &inner_ipv6_hdr(skb)->daddr,
3586 0, IPPROTO_TCP, 0));
3587 }
3588
3589 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3590
3591 *global_data |=
3592 outerip_off |
3593 (!!(xmit_type & XMIT_CSUM_V6) <<
3594 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3595 (outerip_len <<
3596 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3597 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3598 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3599
3600 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3601 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3602 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3603 }
a848ade4
DK
3604}
3605
9f6c9258
DK
3606/* called with netif_tx_lock
3607 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3608 * netif_wake_queue()
3609 */
3610netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3611{
3612 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3613
9f6c9258 3614 struct netdev_queue *txq;
6383c0b3 3615 struct bnx2x_fp_txdata *txdata;
9f6c9258 3616 struct sw_tx_bd *tx_buf;
619c5cb6 3617 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3618 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3619 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3620 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3621 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3622 u32 pbd_e2_parsing_data = 0;
9f6c9258 3623 u16 pkt_prod, bd_prod;
65565884 3624 int nbd, txq_index;
9f6c9258
DK
3625 dma_addr_t mapping;
3626 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3627 int i;
3628 u8 hlen = 0;
3629 __le16 pkt_size = 0;
3630 struct ethhdr *eth;
3631 u8 mac_type = UNICAST_ADDRESS;
3632
3633#ifdef BNX2X_STOP_ON_ERROR
3634 if (unlikely(bp->panic))
3635 return NETDEV_TX_BUSY;
3636#endif
3637
6383c0b3
AE
3638 txq_index = skb_get_queue_mapping(skb);
3639 txq = netdev_get_tx_queue(dev, txq_index);
3640
55c11941 3641 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3642
65565884 3643 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3644
3645 /* enable this debug print to view the transmission queue being used
51c1a580 3646 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3647 txq_index, fp_index, txdata_index); */
9f6c9258 3648
16a5fd92 3649 /* enable this debug print to view the transmission details
51c1a580
MS
3650 DP(NETIF_MSG_TX_QUEUED,
3651 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3652 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3653
6383c0b3 3654 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3655 skb_shinfo(skb)->nr_frags +
3656 BDS_PER_TX_PKT +
3657 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3658 /* Handle special storage cases separately */
c96bdc0c
DK
3659 if (txdata->tx_ring_size == 0) {
3660 struct bnx2x_eth_q_stats *q_stats =
3661 bnx2x_fp_qstats(bp, txdata->parent_fp);
3662 q_stats->driver_filtered_tx_pkt++;
3663 dev_kfree_skb(skb);
3664 return NETDEV_TX_OK;
3665 }
2de67439
YM
3666 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3667 netif_tx_stop_queue(txq);
c96bdc0c 3668 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3669
9f6c9258
DK
3670 return NETDEV_TX_BUSY;
3671 }
3672
51c1a580 3673 DP(NETIF_MSG_TX_QUEUED,
04c46736 3674 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3675 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3676 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3677 skb->len);
9f6c9258
DK
3678
3679 eth = (struct ethhdr *)skb->data;
3680
3681 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3682 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3683 if (is_broadcast_ether_addr(eth->h_dest))
3684 mac_type = BROADCAST_ADDRESS;
3685 else
3686 mac_type = MULTICAST_ADDRESS;
3687 }
3688
91226790 3689#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3690 /* First, check if we need to linearize the skb (due to FW
3691 restrictions). No need to check fragmentation if page size > 8K
3692 (there will be no violation to FW restrictions) */
3693 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3694 /* Statistics of linearization */
3695 bp->lin_cnt++;
3696 if (skb_linearize(skb) != 0) {
51c1a580
MS
3697 DP(NETIF_MSG_TX_QUEUED,
3698 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3699 dev_kfree_skb_any(skb);
3700 return NETDEV_TX_OK;
3701 }
3702 }
3703#endif
619c5cb6
VZ
3704 /* Map skb linear data for DMA */
3705 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3706 skb_headlen(skb), DMA_TO_DEVICE);
3707 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3708 DP(NETIF_MSG_TX_QUEUED,
3709 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3710 dev_kfree_skb_any(skb);
3711 return NETDEV_TX_OK;
3712 }
9f6c9258
DK
3713 /*
3714 Please read carefully. First we use one BD which we mark as start,
3715 then we have a parsing info BD (used for TSO or xsum),
3716 and only then we have the rest of the TSO BDs.
3717 (don't forget to mark the last one as last,
3718 and to unmap only AFTER you write to the BD ...)
3719 And above all, all pdb sizes are in words - NOT DWORDS!
3720 */
3721
619c5cb6
VZ
3722 /* get current pkt produced now - advance it just before sending packet
3723 * since mapping of pages may fail and cause packet to be dropped
3724 */
6383c0b3
AE
3725 pkt_prod = txdata->tx_pkt_prod;
3726 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3727
619c5cb6
VZ
3728 /* get a tx_buf and first BD
3729 * tx_start_bd may be changed during SPLIT,
3730 * but first_bd will always stay first
3731 */
6383c0b3
AE
3732 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3733 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3734 first_bd = tx_start_bd;
9f6c9258
DK
3735
3736 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3737
91226790
DK
3738 /* header nbd: indirectly zero other flags! */
3739 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3740
3741 /* remember the first BD of the packet */
6383c0b3 3742 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3743 tx_buf->skb = skb;
3744 tx_buf->flags = 0;
3745
3746 DP(NETIF_MSG_TX_QUEUED,
3747 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3748 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3749
eab6d18d 3750 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3751 tx_start_bd->vlan_or_ethertype =
3752 cpu_to_le16(vlan_tx_tag_get(skb));
3753 tx_start_bd->bd_flags.as_bitfield |=
3754 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3755 } else {
3756 /* when transmitting in a vf, start bd must hold the ethertype
3757 * for fw to enforce it
3758 */
91226790 3759 if (IS_VF(bp))
dc1ba591
AE
3760 tx_start_bd->vlan_or_ethertype =
3761 cpu_to_le16(ntohs(eth->h_proto));
91226790 3762 else
dc1ba591
AE
3763 /* used by FW for packet accounting */
3764 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3765 }
9f6c9258 3766
91226790
DK
3767 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3768
9f6c9258
DK
3769 /* turn on parsing and get a BD */
3770 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3771
93ef5c02
DK
3772 if (xmit_type & XMIT_CSUM)
3773 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3774
619c5cb6 3775 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3776 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3777 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3778
3779 if (xmit_type & XMIT_CSUM_ENC) {
3780 u16 global_data = 0;
3781
3782 /* Set PBD in enc checksum offload case */
3783 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3784 &pbd_e2_parsing_data,
3785 xmit_type);
3786
3787 /* turn on 2nd parsing and get a BD */
3788 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3789
3790 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3791
3792 memset(pbd2, 0, sizeof(*pbd2));
3793
3794 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3795 (skb_inner_network_header(skb) -
3796 skb->data) >> 1;
3797
3798 if (xmit_type & XMIT_GSO_ENC)
3799 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3800 &global_data,
3801 xmit_type);
3802
3803 pbd2->global_data = cpu_to_le16(global_data);
3804
3805 /* add addition parse BD indication to start BD */
3806 SET_FLAG(tx_start_bd->general_data,
3807 ETH_TX_START_BD_PARSE_NBDS, 1);
3808 /* set encapsulation flag in start BD */
3809 SET_FLAG(tx_start_bd->general_data,
3810 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3811 nbd++;
3812 } else if (xmit_type & XMIT_CSUM) {
91226790 3813 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3814 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3815 &pbd_e2_parsing_data,
3816 xmit_type);
a848ade4 3817 }
dc1ba591 3818
91226790
DK
3819 /* Add the macs to the parsing BD this is a vf */
3820 if (IS_VF(bp)) {
3821 /* override GRE parameters in BD */
3822 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3823 &pbd_e2->data.mac_addr.src_mid,
3824 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3825 eth->h_source);
91226790
DK
3826
3827 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3828 &pbd_e2->data.mac_addr.dst_mid,
3829 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3830 eth->h_dest);
3831 }
96bed4b9
YM
3832
3833 SET_FLAG(pbd_e2_parsing_data,
3834 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3835 } else {
96bed4b9 3836 u16 global_data = 0;
6383c0b3 3837 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3838 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3839 /* Set PBD in checksum offload case */
3840 if (xmit_type & XMIT_CSUM)
3841 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3842
96bed4b9
YM
3843 SET_FLAG(global_data,
3844 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3845 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3846 }
3847
f85582f8 3848 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3849 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3850 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3851 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3852 pkt_size = tx_start_bd->nbytes;
3853
51c1a580 3854 DP(NETIF_MSG_TX_QUEUED,
91226790 3855 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3856 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3857 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3858 tx_start_bd->bd_flags.as_bitfield,
3859 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3860
3861 if (xmit_type & XMIT_GSO) {
3862
3863 DP(NETIF_MSG_TX_QUEUED,
3864 "TSO packet len %d hlen %d total len %d tso size %d\n",
3865 skb->len, hlen, skb_headlen(skb),
3866 skb_shinfo(skb)->gso_size);
3867
3868 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3869
91226790
DK
3870 if (unlikely(skb_headlen(skb) > hlen)) {
3871 nbd++;
6383c0b3
AE
3872 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3873 &tx_start_bd, hlen,
91226790
DK
3874 bd_prod);
3875 }
619c5cb6 3876 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3877 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3878 xmit_type);
f2e0899f 3879 else
44dbc78e 3880 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
9f6c9258 3881 }
2297a2da
VZ
3882
3883 /* Set the PBD's parsing_data field if not zero
3884 * (for the chips newer than 57711).
3885 */
3886 if (pbd_e2_parsing_data)
3887 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3888
9f6c9258
DK
3889 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3890
f85582f8 3891 /* Handle fragmented skb */
9f6c9258
DK
3892 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3893 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3894
9e903e08
ED
3895 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3896 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3897 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3898 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3899
51c1a580
MS
3900 DP(NETIF_MSG_TX_QUEUED,
3901 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3902
3903 /* we need unmap all buffers already mapped
3904 * for this SKB;
3905 * first_bd->nbd need to be properly updated
3906 * before call to bnx2x_free_tx_pkt
3907 */
3908 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3909 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3910 TX_BD(txdata->tx_pkt_prod),
3911 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3912 return NETDEV_TX_OK;
3913 }
3914
9f6c9258 3915 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3916 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3917 if (total_pkt_bd == NULL)
6383c0b3 3918 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3919
9f6c9258
DK
3920 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3921 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3922 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3923 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3924 nbd++;
9f6c9258
DK
3925
3926 DP(NETIF_MSG_TX_QUEUED,
3927 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3928 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3929 le16_to_cpu(tx_data_bd->nbytes));
3930 }
3931
3932 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3933
619c5cb6
VZ
3934 /* update with actual num BDs */
3935 first_bd->nbd = cpu_to_le16(nbd);
3936
9f6c9258
DK
3937 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3938
3939 /* now send a tx doorbell, counting the next BD
3940 * if the packet contains or ends with it
3941 */
3942 if (TX_BD_POFF(bd_prod) < nbd)
3943 nbd++;
3944
619c5cb6
VZ
3945 /* total_pkt_bytes should be set on the first data BD if
3946 * it's not an LSO packet and there is more than one
3947 * data BD. In this case pkt_size is limited by an MTU value.
3948 * However we prefer to set it for an LSO packet (while we don't
3949 * have to) in order to save some CPU cycles in a none-LSO
3950 * case, when we much more care about them.
3951 */
9f6c9258
DK
3952 if (total_pkt_bd != NULL)
3953 total_pkt_bd->total_pkt_bytes = pkt_size;
3954
523224a3 3955 if (pbd_e1x)
9f6c9258 3956 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3957 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3958 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3959 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3960 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3961 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3962 if (pbd_e2)
3963 DP(NETIF_MSG_TX_QUEUED,
3964 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
3965 pbd_e2,
3966 pbd_e2->data.mac_addr.dst_hi,
3967 pbd_e2->data.mac_addr.dst_mid,
3968 pbd_e2->data.mac_addr.dst_lo,
3969 pbd_e2->data.mac_addr.src_hi,
3970 pbd_e2->data.mac_addr.src_mid,
3971 pbd_e2->data.mac_addr.src_lo,
f2e0899f 3972 pbd_e2->parsing_data);
9f6c9258
DK
3973 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3974
2df1a70a
TH
3975 netdev_tx_sent_queue(txq, skb->len);
3976
8373c57d
WB
3977 skb_tx_timestamp(skb);
3978
6383c0b3 3979 txdata->tx_pkt_prod++;
9f6c9258
DK
3980 /*
3981 * Make sure that the BD data is updated before updating the producer
3982 * since FW might read the BD right after the producer is updated.
3983 * This is only applicable for weak-ordered memory model archs such
3984 * as IA-64. The following barrier is also mandatory since FW will
3985 * assumes packets must have BDs.
3986 */
3987 wmb();
3988
6383c0b3 3989 txdata->tx_db.data.prod += nbd;
9f6c9258 3990 barrier();
f85582f8 3991
6383c0b3 3992 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
3993
3994 mmiowb();
3995
6383c0b3 3996 txdata->tx_bd_prod += nbd;
9f6c9258 3997
7df2dc6b 3998 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
3999 netif_tx_stop_queue(txq);
4000
4001 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4002 * ordering of set_bit() in netif_tx_stop_queue() and read of
4003 * fp->bd_tx_cons */
4004 smp_mb();
4005
15192a8c 4006 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4007 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4008 netif_tx_wake_queue(txq);
4009 }
6383c0b3 4010 txdata->tx_pkt++;
9f6c9258
DK
4011
4012 return NETDEV_TX_OK;
4013}
f85582f8 4014
6383c0b3
AE
4015/**
4016 * bnx2x_setup_tc - routine to configure net_device for multi tc
4017 *
4018 * @netdev: net device to configure
4019 * @tc: number of traffic classes to enable
4020 *
4021 * callback connected to the ndo_setup_tc function pointer
4022 */
4023int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4024{
4025 int cos, prio, count, offset;
4026 struct bnx2x *bp = netdev_priv(dev);
4027
4028 /* setup tc must be called under rtnl lock */
4029 ASSERT_RTNL();
4030
16a5fd92 4031 /* no traffic classes requested. Aborting */
6383c0b3
AE
4032 if (!num_tc) {
4033 netdev_reset_tc(dev);
4034 return 0;
4035 }
4036
4037 /* requested to support too many traffic classes */
4038 if (num_tc > bp->max_cos) {
6bf07b8e 4039 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4040 num_tc, bp->max_cos);
6383c0b3
AE
4041 return -EINVAL;
4042 }
4043
4044 /* declare amount of supported traffic classes */
4045 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4046 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4047 return -EINVAL;
4048 }
4049
4050 /* configure priority to traffic class mapping */
4051 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4052 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4053 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4054 "mapping priority %d to tc %d\n",
6383c0b3
AE
4055 prio, bp->prio_to_cos[prio]);
4056 }
4057
16a5fd92 4058 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4059 This can be used for ets or pfc, and save the effort of setting
4060 up a multio class queue disc or negotiating DCBX with a switch
4061 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4062 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4063 for (prio = 1; prio < 16; prio++) {
4064 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4065 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4066 } */
4067
4068 /* configure traffic class to transmission queue mapping */
4069 for (cos = 0; cos < bp->max_cos; cos++) {
4070 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4071 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4072 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4073 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4074 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4075 cos, offset, count);
4076 }
4077
4078 return 0;
4079}
4080
9f6c9258
DK
4081/* called with rtnl_lock */
4082int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4083{
4084 struct sockaddr *addr = p;
4085 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4086 int rc = 0;
9f6c9258 4087
51c1a580
MS
4088 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4089 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4090 return -EINVAL;
51c1a580 4091 }
614c76df 4092
a3348722
BW
4093 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4094 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4095 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4096 return -EINVAL;
51c1a580 4097 }
9f6c9258 4098
619c5cb6
VZ
4099 if (netif_running(dev)) {
4100 rc = bnx2x_set_eth_mac(bp, false);
4101 if (rc)
4102 return rc;
4103 }
4104
9f6c9258 4105 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4106
523224a3 4107 if (netif_running(dev))
619c5cb6 4108 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4109
619c5cb6 4110 return rc;
9f6c9258
DK
4111}
4112
b3b83c3f
DK
4113static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4114{
4115 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4116 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4117 u8 cos;
b3b83c3f
DK
4118
4119 /* Common */
55c11941 4120
b3b83c3f
DK
4121 if (IS_FCOE_IDX(fp_index)) {
4122 memset(sb, 0, sizeof(union host_hc_status_block));
4123 fp->status_blk_mapping = 0;
b3b83c3f 4124 } else {
b3b83c3f 4125 /* status blocks */
619c5cb6 4126 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4127 BNX2X_PCI_FREE(sb->e2_sb,
4128 bnx2x_fp(bp, fp_index,
4129 status_blk_mapping),
4130 sizeof(struct host_hc_status_block_e2));
4131 else
4132 BNX2X_PCI_FREE(sb->e1x_sb,
4133 bnx2x_fp(bp, fp_index,
4134 status_blk_mapping),
4135 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4136 }
55c11941 4137
b3b83c3f
DK
4138 /* Rx */
4139 if (!skip_rx_queue(bp, fp_index)) {
4140 bnx2x_free_rx_bds(fp);
4141
4142 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4143 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4144 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4145 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4146 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4147
4148 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4149 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4150 sizeof(struct eth_fast_path_rx_cqe) *
4151 NUM_RCQ_BD);
4152
4153 /* SGE ring */
4154 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4155 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4156 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4157 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4158 }
4159
4160 /* Tx */
4161 if (!skip_tx_queue(bp, fp_index)) {
4162 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4163 for_each_cos_in_tx_queue(fp, cos) {
65565884 4164 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4165
51c1a580 4166 DP(NETIF_MSG_IFDOWN,
94f05b0f 4167 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4168 fp_index, cos, txdata->cid);
4169
4170 BNX2X_FREE(txdata->tx_buf_ring);
4171 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4172 txdata->tx_desc_mapping,
4173 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4174 }
b3b83c3f
DK
4175 }
4176 /* end of fastpath */
4177}
4178
55c11941
MS
4179void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4180{
4181 int i;
4182 for_each_cnic_queue(bp, i)
4183 bnx2x_free_fp_mem_at(bp, i);
4184}
4185
b3b83c3f
DK
4186void bnx2x_free_fp_mem(struct bnx2x *bp)
4187{
4188 int i;
55c11941 4189 for_each_eth_queue(bp, i)
b3b83c3f
DK
4190 bnx2x_free_fp_mem_at(bp, i);
4191}
4192
1191cb83 4193static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4194{
4195 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4196 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4197 bnx2x_fp(bp, index, sb_index_values) =
4198 (__le16 *)status_blk.e2_sb->sb.index_values;
4199 bnx2x_fp(bp, index, sb_running_index) =
4200 (__le16 *)status_blk.e2_sb->sb.running_index;
4201 } else {
4202 bnx2x_fp(bp, index, sb_index_values) =
4203 (__le16 *)status_blk.e1x_sb->sb.index_values;
4204 bnx2x_fp(bp, index, sb_running_index) =
4205 (__le16 *)status_blk.e1x_sb->sb.running_index;
4206 }
4207}
4208
1191cb83
ED
4209/* Returns the number of actually allocated BDs */
4210static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4211 int rx_ring_size)
4212{
4213 struct bnx2x *bp = fp->bp;
4214 u16 ring_prod, cqe_ring_prod;
4215 int i, failure_cnt = 0;
4216
4217 fp->rx_comp_cons = 0;
4218 cqe_ring_prod = ring_prod = 0;
4219
4220 /* This routine is called only during fo init so
4221 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4222 */
4223 for (i = 0; i < rx_ring_size; i++) {
4224 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4225 failure_cnt++;
4226 continue;
4227 }
4228 ring_prod = NEXT_RX_IDX(ring_prod);
4229 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4230 WARN_ON(ring_prod <= (i - failure_cnt));
4231 }
4232
4233 if (failure_cnt)
4234 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4235 i - failure_cnt, fp->index);
4236
4237 fp->rx_bd_prod = ring_prod;
4238 /* Limit the CQE producer by the CQE ring size */
4239 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4240 cqe_ring_prod);
4241 fp->rx_pkt = fp->rx_calls = 0;
4242
15192a8c 4243 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4244
4245 return i - failure_cnt;
4246}
4247
4248static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4249{
4250 int i;
4251
4252 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4253 struct eth_rx_cqe_next_page *nextpg;
4254
4255 nextpg = (struct eth_rx_cqe_next_page *)
4256 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4257 nextpg->addr_hi =
4258 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4259 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4260 nextpg->addr_lo =
4261 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4262 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4263 }
4264}
4265
b3b83c3f
DK
4266static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4267{
4268 union host_hc_status_block *sb;
4269 struct bnx2x_fastpath *fp = &bp->fp[index];
4270 int ring_size = 0;
6383c0b3 4271 u8 cos;
c2188952 4272 int rx_ring_size = 0;
b3b83c3f 4273
a3348722
BW
4274 if (!bp->rx_ring_size &&
4275 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4276 rx_ring_size = MIN_RX_SIZE_NONTPA;
4277 bp->rx_ring_size = rx_ring_size;
55c11941 4278 } else if (!bp->rx_ring_size) {
c2188952
VZ
4279 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4280
065f8b92
YM
4281 if (CHIP_IS_E3(bp)) {
4282 u32 cfg = SHMEM_RD(bp,
4283 dev_info.port_hw_config[BP_PORT(bp)].
4284 default_cfg);
4285
4286 /* Decrease ring size for 1G functions */
4287 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4288 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4289 rx_ring_size /= 10;
4290 }
d760fc37 4291
c2188952
VZ
4292 /* allocate at least number of buffers required by FW */
4293 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4294 MIN_RX_SIZE_TPA, rx_ring_size);
4295
4296 bp->rx_ring_size = rx_ring_size;
614c76df 4297 } else /* if rx_ring_size specified - use it */
c2188952 4298 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4299
04c46736
YM
4300 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4301
b3b83c3f
DK
4302 /* Common */
4303 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4304
b3b83c3f 4305 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4306 /* status blocks */
619c5cb6 4307 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4308 BNX2X_PCI_ALLOC(sb->e2_sb,
4309 &bnx2x_fp(bp, index, status_blk_mapping),
4310 sizeof(struct host_hc_status_block_e2));
4311 else
4312 BNX2X_PCI_ALLOC(sb->e1x_sb,
4313 &bnx2x_fp(bp, index, status_blk_mapping),
4314 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4315 }
8eef2af1
DK
4316
4317 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4318 * set shortcuts for it.
4319 */
4320 if (!IS_FCOE_IDX(index))
4321 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4322
4323 /* Tx */
4324 if (!skip_tx_queue(bp, index)) {
4325 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4326 for_each_cos_in_tx_queue(fp, cos) {
65565884 4327 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4328
51c1a580
MS
4329 DP(NETIF_MSG_IFUP,
4330 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4331 index, cos);
4332
4333 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4334 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4335 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4336 &txdata->tx_desc_mapping,
b3b83c3f 4337 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4338 }
b3b83c3f
DK
4339 }
4340
4341 /* Rx */
4342 if (!skip_rx_queue(bp, index)) {
4343 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4344 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4345 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4346 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4347 &bnx2x_fp(bp, index, rx_desc_mapping),
4348 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4349
75b29459
DK
4350 /* Seed all CQEs by 1s */
4351 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4352 &bnx2x_fp(bp, index, rx_comp_mapping),
4353 sizeof(struct eth_fast_path_rx_cqe) *
4354 NUM_RCQ_BD);
b3b83c3f
DK
4355
4356 /* SGE ring */
4357 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4358 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4359 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4360 &bnx2x_fp(bp, index, rx_sge_mapping),
4361 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4362 /* RX BD ring */
4363 bnx2x_set_next_page_rx_bd(fp);
4364
4365 /* CQ ring */
4366 bnx2x_set_next_page_rx_cq(fp);
4367
4368 /* BDs */
4369 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4370 if (ring_size < rx_ring_size)
4371 goto alloc_mem_err;
4372 }
4373
4374 return 0;
4375
4376/* handles low memory cases */
4377alloc_mem_err:
4378 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4379 index, ring_size);
4380 /* FW will drop all packets if queue is not big enough,
4381 * In these cases we disable the queue
6383c0b3 4382 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4383 */
4384 if (ring_size < (fp->disable_tpa ?
eb722d7a 4385 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4386 /* release memory allocated for this queue */
4387 bnx2x_free_fp_mem_at(bp, index);
4388 return -ENOMEM;
4389 }
4390 return 0;
4391}
4392
55c11941
MS
4393int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4394{
4395 if (!NO_FCOE(bp))
4396 /* FCoE */
4397 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4398 /* we will fail load process instead of mark
4399 * NO_FCOE_FLAG
4400 */
4401 return -ENOMEM;
4402
4403 return 0;
4404}
4405
b3b83c3f
DK
4406int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4407{
4408 int i;
4409
55c11941
MS
4410 /* 1. Allocate FP for leading - fatal if error
4411 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4412 */
4413
4414 /* leading */
4415 if (bnx2x_alloc_fp_mem_at(bp, 0))
4416 return -ENOMEM;
6383c0b3 4417
b3b83c3f
DK
4418 /* RSS */
4419 for_each_nondefault_eth_queue(bp, i)
4420 if (bnx2x_alloc_fp_mem_at(bp, i))
4421 break;
4422
4423 /* handle memory failures */
4424 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4425 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4426
4427 WARN_ON(delta < 0);
4864a16a 4428 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4429 if (CNIC_SUPPORT(bp))
4430 /* move non eth FPs next to last eth FP
4431 * must be done in that order
4432 * FCOE_IDX < FWD_IDX < OOO_IDX
4433 */
b3b83c3f 4434
55c11941
MS
4435 /* move FCoE fp even NO_FCOE_FLAG is on */
4436 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4437 bp->num_ethernet_queues -= delta;
4438 bp->num_queues = bp->num_ethernet_queues +
4439 bp->num_cnic_queues;
b3b83c3f
DK
4440 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4441 bp->num_queues + delta, bp->num_queues);
4442 }
4443
4444 return 0;
4445}
d6214d7a 4446
523224a3
DK
4447void bnx2x_free_mem_bp(struct bnx2x *bp)
4448{
c3146eb6
DK
4449 int i;
4450
4451 for (i = 0; i < bp->fp_array_size; i++)
4452 kfree(bp->fp[i].tpa_info);
523224a3 4453 kfree(bp->fp);
15192a8c
BW
4454 kfree(bp->sp_objs);
4455 kfree(bp->fp_stats);
65565884 4456 kfree(bp->bnx2x_txq);
523224a3
DK
4457 kfree(bp->msix_table);
4458 kfree(bp->ilt);
4459}
4460
0329aba1 4461int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4462{
4463 struct bnx2x_fastpath *fp;
4464 struct msix_entry *tbl;
4465 struct bnx2x_ilt *ilt;
6383c0b3 4466 int msix_table_size = 0;
55c11941 4467 int fp_array_size, txq_array_size;
15192a8c 4468 int i;
6383c0b3
AE
4469
4470 /*
4471 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4472 * path IGU SBs plus default SB (for PF only).
6383c0b3 4473 */
1ab4434c
AE
4474 msix_table_size = bp->igu_sb_cnt;
4475 if (IS_PF(bp))
4476 msix_table_size++;
4477 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4478
6383c0b3 4479 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4480 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4481 bp->fp_array_size = fp_array_size;
4482 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4483
c3146eb6 4484 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4485 if (!fp)
4486 goto alloc_err;
c3146eb6 4487 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4488 fp[i].tpa_info =
4489 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4490 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4491 if (!(fp[i].tpa_info))
4492 goto alloc_err;
4493 }
4494
523224a3
DK
4495 bp->fp = fp;
4496
15192a8c 4497 /* allocate sp objs */
c3146eb6 4498 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4499 GFP_KERNEL);
4500 if (!bp->sp_objs)
4501 goto alloc_err;
4502
4503 /* allocate fp_stats */
c3146eb6 4504 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4505 GFP_KERNEL);
4506 if (!bp->fp_stats)
4507 goto alloc_err;
4508
65565884 4509 /* Allocate memory for the transmission queues array */
55c11941
MS
4510 txq_array_size =
4511 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4512 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4513
4514 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4515 GFP_KERNEL);
65565884
MS
4516 if (!bp->bnx2x_txq)
4517 goto alloc_err;
4518
523224a3 4519 /* msix table */
01e23742 4520 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4521 if (!tbl)
4522 goto alloc_err;
4523 bp->msix_table = tbl;
4524
4525 /* ilt */
4526 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4527 if (!ilt)
4528 goto alloc_err;
4529 bp->ilt = ilt;
4530
4531 return 0;
4532alloc_err:
4533 bnx2x_free_mem_bp(bp);
4534 return -ENOMEM;
523224a3
DK
4535}
4536
a9fccec7 4537int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4538{
4539 struct bnx2x *bp = netdev_priv(dev);
4540
4541 if (unlikely(!netif_running(dev)))
4542 return 0;
4543
5d07d868 4544 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4545 return bnx2x_nic_load(bp, LOAD_NORMAL);
4546}
4547
1ac9e428
YR
4548int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4549{
4550 u32 sel_phy_idx = 0;
4551 if (bp->link_params.num_phys <= 1)
4552 return INT_PHY;
4553
4554 if (bp->link_vars.link_up) {
4555 sel_phy_idx = EXT_PHY1;
4556 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4557 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4558 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4559 sel_phy_idx = EXT_PHY2;
4560 } else {
4561
4562 switch (bnx2x_phy_selection(&bp->link_params)) {
4563 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4564 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4565 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4566 sel_phy_idx = EXT_PHY1;
4567 break;
4568 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4569 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4570 sel_phy_idx = EXT_PHY2;
4571 break;
4572 }
4573 }
4574
4575 return sel_phy_idx;
1ac9e428
YR
4576}
4577int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4578{
4579 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4580 /*
2de67439 4581 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4582 * swapping is enabled). So when swapping is enabled, we need to reverse
4583 * the configuration
4584 */
4585
4586 if (bp->link_params.multi_phy_config &
4587 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4588 if (sel_phy_idx == EXT_PHY1)
4589 sel_phy_idx = EXT_PHY2;
4590 else if (sel_phy_idx == EXT_PHY2)
4591 sel_phy_idx = EXT_PHY1;
4592 }
4593 return LINK_CONFIG_IDX(sel_phy_idx);
4594}
4595
55c11941 4596#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4597int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4598{
4599 struct bnx2x *bp = netdev_priv(dev);
4600 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4601
4602 switch (type) {
4603 case NETDEV_FCOE_WWNN:
4604 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4605 cp->fcoe_wwn_node_name_lo);
4606 break;
4607 case NETDEV_FCOE_WWPN:
4608 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4609 cp->fcoe_wwn_port_name_lo);
4610 break;
4611 default:
51c1a580 4612 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4613 return -EINVAL;
4614 }
4615
4616 return 0;
4617}
4618#endif
4619
9f6c9258
DK
4620/* called with rtnl_lock */
4621int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4622{
4623 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4624
4625 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4626 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4627 return -EAGAIN;
4628 }
4629
4630 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4631 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4632 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4633 return -EINVAL;
51c1a580 4634 }
9f6c9258
DK
4635
4636 /* This does not race with packet allocation
4637 * because the actual alloc size is
4638 * only updated as part of load
4639 */
4640 dev->mtu = new_mtu;
4641
66371c44
MM
4642 return bnx2x_reload_if_running(dev);
4643}
4644
c8f44aff 4645netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4646 netdev_features_t features)
66371c44
MM
4647{
4648 struct bnx2x *bp = netdev_priv(dev);
4649
4650 /* TPA requires Rx CSUM offloading */
621b4d66 4651 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4652 features &= ~NETIF_F_LRO;
621b4d66
DK
4653 features &= ~NETIF_F_GRO;
4654 }
66371c44
MM
4655
4656 return features;
4657}
4658
c8f44aff 4659int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4660{
4661 struct bnx2x *bp = netdev_priv(dev);
4662 u32 flags = bp->flags;
8802f579 4663 u32 changes;
538dd2e3 4664 bool bnx2x_reload = false;
66371c44
MM
4665
4666 if (features & NETIF_F_LRO)
4667 flags |= TPA_ENABLE_FLAG;
4668 else
4669 flags &= ~TPA_ENABLE_FLAG;
4670
621b4d66
DK
4671 if (features & NETIF_F_GRO)
4672 flags |= GRO_ENABLE_FLAG;
4673 else
4674 flags &= ~GRO_ENABLE_FLAG;
4675
538dd2e3
MB
4676 if (features & NETIF_F_LOOPBACK) {
4677 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4678 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4679 bnx2x_reload = true;
4680 }
4681 } else {
4682 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4683 bp->link_params.loopback_mode = LOOPBACK_NONE;
4684 bnx2x_reload = true;
4685 }
4686 }
4687
8802f579
ED
4688 changes = flags ^ bp->flags;
4689
16a5fd92 4690 /* if GRO is changed while LRO is enabled, don't force a reload */
8802f579
ED
4691 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4692 changes &= ~GRO_ENABLE_FLAG;
4693
4694 if (changes)
538dd2e3 4695 bnx2x_reload = true;
8802f579
ED
4696
4697 bp->flags = flags;
66371c44 4698
538dd2e3 4699 if (bnx2x_reload) {
66371c44
MM
4700 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4701 return bnx2x_reload_if_running(dev);
4702 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4703 }
4704
66371c44 4705 return 0;
9f6c9258
DK
4706}
4707
4708void bnx2x_tx_timeout(struct net_device *dev)
4709{
4710 struct bnx2x *bp = netdev_priv(dev);
4711
4712#ifdef BNX2X_STOP_ON_ERROR
4713 if (!bp->panic)
4714 bnx2x_panic();
4715#endif
7be08a72
AE
4716
4717 smp_mb__before_clear_bit();
4718 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4719 smp_mb__after_clear_bit();
4720
9f6c9258 4721 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4722 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4723}
4724
9f6c9258
DK
4725int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4726{
4727 struct net_device *dev = pci_get_drvdata(pdev);
4728 struct bnx2x *bp;
4729
4730 if (!dev) {
4731 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4732 return -ENODEV;
4733 }
4734 bp = netdev_priv(dev);
4735
4736 rtnl_lock();
4737
4738 pci_save_state(pdev);
4739
4740 if (!netif_running(dev)) {
4741 rtnl_unlock();
4742 return 0;
4743 }
4744
4745 netif_device_detach(dev);
4746
5d07d868 4747 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4748
4749 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4750
4751 rtnl_unlock();
4752
4753 return 0;
4754}
4755
4756int bnx2x_resume(struct pci_dev *pdev)
4757{
4758 struct net_device *dev = pci_get_drvdata(pdev);
4759 struct bnx2x *bp;
4760 int rc;
4761
4762 if (!dev) {
4763 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4764 return -ENODEV;
4765 }
4766 bp = netdev_priv(dev);
4767
4768 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4769 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4770 return -EAGAIN;
4771 }
4772
4773 rtnl_lock();
4774
4775 pci_restore_state(pdev);
4776
4777 if (!netif_running(dev)) {
4778 rtnl_unlock();
4779 return 0;
4780 }
4781
4782 bnx2x_set_power_state(bp, PCI_D0);
4783 netif_device_attach(dev);
4784
4785 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4786
4787 rtnl_unlock();
4788
4789 return rc;
4790}
619c5cb6 4791
619c5cb6
VZ
4792void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4793 u32 cid)
4794{
b9871bcf
AE
4795 if (!cxt) {
4796 BNX2X_ERR("bad context pointer %p\n", cxt);
4797 return;
4798 }
4799
619c5cb6
VZ
4800 /* ustorm cxt validation */
4801 cxt->ustorm_ag_context.cdu_usage =
4802 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4803 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4804 /* xcontext validation */
4805 cxt->xstorm_ag_context.cdu_reserved =
4806 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4807 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4808}
4809
1191cb83
ED
4810static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4811 u8 fw_sb_id, u8 sb_index,
4812 u8 ticks)
619c5cb6 4813{
619c5cb6
VZ
4814 u32 addr = BAR_CSTRORM_INTMEM +
4815 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4816 REG_WR8(bp, addr, ticks);
51c1a580
MS
4817 DP(NETIF_MSG_IFUP,
4818 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4819 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4820}
4821
1191cb83
ED
4822static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4823 u16 fw_sb_id, u8 sb_index,
4824 u8 disable)
619c5cb6
VZ
4825{
4826 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4827 u32 addr = BAR_CSTRORM_INTMEM +
4828 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 4829 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
4830 /* clear and set */
4831 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4832 flags |= enable_flag;
0c14e5ce 4833 REG_WR8(bp, addr, flags);
51c1a580
MS
4834 DP(NETIF_MSG_IFUP,
4835 "port %x fw_sb_id %d sb_index %d disable %d\n",
4836 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4837}
4838
4839void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4840 u8 sb_index, u8 disable, u16 usec)
4841{
4842 int port = BP_PORT(bp);
4843 u8 ticks = usec / BNX2X_BTR;
4844
4845 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4846
4847 disable = disable ? 1 : (usec ? 0 : 1);
4848 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4849}