net: vlan: prepare for 802.1ad support
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
c0cba59e 27#include <linux/prefetch.h>
9f6c9258 28#include "bnx2x_cmn.h"
523224a3 29#include "bnx2x_init.h"
042181f5 30#include "bnx2x_sp.h"
9f6c9258 31
b3b83c3f
DK
32/**
33 * bnx2x_move_fp - move content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @from: source FP index
37 * @to: destination FP index
38 *
39 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
40 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
65565884
MS
42 * source onto the target. Update txdata pointers and related
43 * content.
b3b83c3f
DK
44 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
49 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
53 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
72754080
AE
55
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
58
b3b83c3f
DK
59 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
61 to_fp->index = to;
65565884 62
15192a8c
BW
63 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
65565884
MS
69 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
72 */
73
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76 (bp)->max_cos;
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 }
81
4864a16a
YM
82 memcpy(&bp->bnx2x_txq[new_txdata_index],
83 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
84 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
86}
87
8ca5e17e
AE
88/**
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
90 *
91 * @bp: driver handle
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
94 *
95 */
96void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97{
98 if (IS_PF(bp)) {
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106 "bc %d.%d.%d%s%s",
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111 } else {
6411280a 112 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
113 }
114}
115
4864a16a
YM
116/**
117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
118 *
119 * @bp: driver handle
120 * @delta: number of eth queues which were not allocated
121 */
122static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
123{
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
125
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden
128 */
129 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) {
131 struct bnx2x_fastpath *fp = &bp->fp[i];
132 int new_idx = cos * (old_eth_num - delta) + i;
133
134 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135 sizeof(struct bnx2x_fp_txdata));
136 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
137 }
138 }
139}
140
619c5cb6
VZ
141int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
142
9f6c9258
DK
143/* free skb in the packet ring at pos idx
144 * return idx of last bd freed
145 */
6383c0b3 146static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
147 u16 idx, unsigned int *pkts_compl,
148 unsigned int *bytes_compl)
9f6c9258 149{
6383c0b3 150 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
151 struct eth_tx_start_bd *tx_start_bd;
152 struct eth_tx_bd *tx_data_bd;
153 struct sk_buff *skb = tx_buf->skb;
154 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
155 int nbd;
156
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
158 prefetch(&skb->end);
159
51c1a580 160 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 161 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
162
163 /* unmap first bd */
6383c0b3 164 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 167
619c5cb6 168
9f6c9258
DK
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170#ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
173 bnx2x_panic();
174 }
175#endif
176 new_cons = nbd + tx_buf->first_bd;
177
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181 /* Skip a parse bd... */
182 --nbd;
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187 --nbd;
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189 }
190
191 /* now free frags */
192 while (nbd > 0) {
193
6383c0b3 194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197 if (--nbd)
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199 }
200
201 /* release skb */
202 WARN_ON(!skb);
d8290ae5 203 if (likely(skb)) {
2df1a70a
TH
204 (*pkts_compl)++;
205 (*bytes_compl) += skb->len;
206 }
d8290ae5 207
40955532 208 dev_kfree_skb_any(skb);
9f6c9258
DK
209 tx_buf->first_bd = 0;
210 tx_buf->skb = NULL;
211
212 return new_cons;
213}
214
6383c0b3 215int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 216{
9f6c9258 217 struct netdev_queue *txq;
6383c0b3 218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 219 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
220
221#ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
223 return -1;
224#endif
225
6383c0b3
AE
226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
229
230 while (sw_cons != hw_cons) {
231 u16 pkt_cons;
232
233 pkt_cons = TX_BD(sw_cons);
234
51c1a580
MS
235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 238
2df1a70a 239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 240 &pkts_compl, &bytes_compl);
2df1a70a 241
9f6c9258
DK
242 sw_cons++;
243 }
244
2df1a70a
TH
245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
6383c0b3
AE
247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
249
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
254 * forever.
619c5cb6
VZ
255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
258 */
259 smp_mb();
260
9f6c9258
DK
261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 *
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
269 * stops the queue
270 */
271
272 __netif_tx_lock(txq, smp_processor_id());
273
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
277 netif_tx_wake_queue(txq);
278
279 __netif_tx_unlock(txq);
280 }
281 return 0;
282}
283
284static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285 u16 idx)
286{
287 u16 last_max = fp->last_max_sge;
288
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
291}
292
621b4d66
DK
293static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294 u16 sge_len,
295 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
296{
297 struct bnx2x *bp = fp->bp;
9f6c9258
DK
298 u16 last_max, last_elem, first_elem;
299 u16 delta = 0;
300 u16 i;
301
302 if (!sge_len)
303 return;
304
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
619c5cb6 307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
309
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
312
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
523224a3 315 bnx2x_update_last_max_sge(fp,
621b4d66 316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
317
318 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
321
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
324 last_elem++;
325
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
329 break;
330
619c5cb6
VZ
331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
333 }
334
335 if (delta > 0) {
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
339 }
340
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
344}
345
2de67439 346/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
347 * CQE (calculated by HW).
348 */
349static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
350 const struct eth_fast_path_rx_cqe *cqe,
351 bool *l4_rxhash)
e52fcb24 352{
2de67439 353 /* Get Toeplitz hash from CQE */
e52fcb24 354 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
357
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 361 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
362 }
363 *l4_rxhash = false;
e52fcb24
ED
364 return 0;
365}
366
9f6c9258 367static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 368 u16 cons, u16 prod,
619c5cb6 369 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
370{
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375 dma_addr_t mapping;
619c5cb6
VZ
376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 378
619c5cb6
VZ
379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
e52fcb24 383 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 384 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 385 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
386 fp->rx_buf_size, DMA_FROM_DEVICE);
387 /*
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
391 */
392
393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
e52fcb24 395 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397 return;
398 }
9f6c9258 399
e52fcb24
ED
400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
619c5cb6 402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 403 /* point prod_bd to new data */
9f6c9258
DK
404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
619c5cb6
VZ
407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
409
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 420 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
421 tpa_info->gro_size = gro_size;
422 }
619c5cb6 423
9f6c9258
DK
424#ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426#ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428#else
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
430#endif
431 fp->tpa_queue_used);
432#endif
433}
434
e4e3c02a
VZ
435/* Timestamp option length allowed for TPA aggregation:
436 *
437 * nop nop kind length echo val
438 */
439#define TPA_TSTAMP_OPT_LEN 12
440/**
cbf1de72 441 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 442 *
cbf1de72 443 * @skb: packet skb
e8920674
DK
444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
446 * aggregation.
cbf1de72 447 * @pkt_len: length of all segments
e8920674
DK
448 *
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
2de67439 451 * Compute number of aggregated segments, and gso_type.
e4e3c02a 452 */
cbf1de72 453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
e4e3c02a 456{
cbf1de72 457 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 458 * other than timestamp or IPv6 extension headers.
e4e3c02a 459 */
619c5cb6
VZ
460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 463 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 464 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466 } else {
619c5cb6 467 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469 }
e4e3c02a
VZ
470
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
473 *
474 * Otherwise FW would close the aggregation.
475 */
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
cbf1de72
YM
479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
483 */
ab5777d7 484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
485}
486
1191cb83
ED
487static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488 struct bnx2x_fastpath *fp, u16 index)
489{
490 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
493 dma_addr_t mapping;
494
495 if (unlikely(page == NULL)) {
496 BNX2X_ERR("Can't alloc sge\n");
497 return -ENOMEM;
498 }
499
500 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 501 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
502 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503 __free_pages(page, PAGES_PER_SGE_SHIFT);
504 BNX2X_ERR("Can't map sge\n");
505 return -ENOMEM;
506 }
507
508 sw_buf->page = page;
509 dma_unmap_addr_set(sw_buf, mapping, mapping);
510
511 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
513
514 return 0;
515}
516
9f6c9258 517static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
518 struct bnx2x_agg_info *tpa_info,
519 u16 pages,
520 struct sk_buff *skb,
619c5cb6
VZ
521 struct eth_end_agg_rx_cqe *cqe,
522 u16 cqe_idx)
9f6c9258
DK
523{
524 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
525 u32 i, frag_len, frag_size;
526 int err, j, frag_id = 0;
619c5cb6 527 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 528 u16 full_page = 0, gro_size = 0;
9f6c9258 529
619c5cb6 530 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
531
532 if (fp->mode == TPA_MODE_GRO) {
533 gro_size = tpa_info->gro_size;
534 full_page = tpa_info->full_page;
535 }
9f6c9258
DK
536
537 /* This is needed in order to enable forwarding support */
cbf1de72
YM
538 if (frag_size)
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 542
9f6c9258 543#ifdef BNX2X_STOP_ON_ERROR
924d75ab 544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 pages, cqe_idx);
619c5cb6 547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
548 bnx2x_panic();
549 return -EINVAL;
550 }
551#endif
552
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
556
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
561 else /* LRO */
924d75ab 562 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 563
9f6c9258
DK
564 rx_pg = &fp->rx_page_ring[sge_idx];
565 old_rx_pg = *rx_pg;
566
567 /* If we fail to allocate a substitute page, we simply stop
568 where we are and drop the whole packet */
569 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
570 if (unlikely(err)) {
15192a8c 571 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
572 return err;
573 }
574
575 /* Unmap the page as we r going to pass it to the stack */
576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 578 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 579 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
580 if (fp->mode == TPA_MODE_LRO)
581 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
582 else { /* GRO */
583 int rem;
584 int offset = 0;
585 for (rem = frag_len; rem > 0; rem -= gro_size) {
586 int len = rem > gro_size ? gro_size : rem;
587 skb_fill_page_desc(skb, frag_id++,
588 old_rx_pg.page, offset, len);
589 if (offset)
590 get_page(old_rx_pg.page);
591 offset += len;
592 }
593 }
9f6c9258
DK
594
595 skb->data_len += frag_len;
924d75ab 596 skb->truesize += SGE_PAGES;
9f6c9258
DK
597 skb->len += frag_len;
598
599 frag_size -= frag_len;
600 }
601
602 return 0;
603}
604
d46d132c
ED
605static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
606{
607 if (fp->rx_frag_size)
608 put_page(virt_to_head_page(data));
609 else
610 kfree(data);
611}
612
613static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
614{
615 if (fp->rx_frag_size)
616 return netdev_alloc_frag(fp->rx_frag_size);
617
618 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
619}
620
9969085e
YM
621#ifdef CONFIG_INET
622static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
623{
624 const struct iphdr *iph = ip_hdr(skb);
625 struct tcphdr *th;
626
627 skb_set_transport_header(skb, sizeof(struct iphdr));
628 th = tcp_hdr(skb);
629
630 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631 iph->saddr, iph->daddr, 0);
632}
633
634static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
635{
636 struct ipv6hdr *iph = ipv6_hdr(skb);
637 struct tcphdr *th;
638
639 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
640 th = tcp_hdr(skb);
641
642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643 &iph->saddr, &iph->daddr, 0);
644}
645#endif
646
647static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
648 struct sk_buff *skb)
649{
650#ifdef CONFIG_INET
cbf1de72 651 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
652 skb_set_network_header(skb, 0);
653 switch (be16_to_cpu(skb->protocol)) {
654 case ETH_P_IP:
655 bnx2x_gro_ip_csum(bp, skb);
656 break;
657 case ETH_P_IPV6:
658 bnx2x_gro_ipv6_csum(bp, skb);
659 break;
660 default:
661 BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
662 be16_to_cpu(skb->protocol));
663 }
664 tcp_gro_complete(skb);
665 }
666#endif
667 napi_gro_receive(&fp->napi, skb);
668}
669
1191cb83
ED
670static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
671 struct bnx2x_agg_info *tpa_info,
672 u16 pages,
673 struct eth_end_agg_rx_cqe *cqe,
674 u16 cqe_idx)
9f6c9258 675{
619c5cb6 676 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 677 u8 pad = tpa_info->placement_offset;
619c5cb6 678 u16 len = tpa_info->len_on_bd;
e52fcb24 679 struct sk_buff *skb = NULL;
621b4d66 680 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
681 u8 old_tpa_state = tpa_info->tpa_state;
682
683 tpa_info->tpa_state = BNX2X_TPA_STOP;
684
685 /* If we there was an error during the handling of the TPA_START -
686 * drop this aggregation.
687 */
688 if (old_tpa_state == BNX2X_TPA_ERROR)
689 goto drop;
690
e52fcb24 691 /* Try to allocate the new data */
d46d132c 692 new_data = bnx2x_frag_alloc(fp);
9f6c9258
DK
693 /* Unmap skb in the pool anyway, as we are going to change
694 pool entry status to BNX2X_TPA_STOP even if new skb allocation
695 fails. */
696 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 697 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 698 if (likely(new_data))
d46d132c 699 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 700
e52fcb24 701 if (likely(skb)) {
9f6c9258 702#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 703 if (pad + len > fp->rx_buf_size) {
51c1a580 704 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 705 pad, len, fp->rx_buf_size);
9f6c9258
DK
706 bnx2x_panic();
707 return;
708 }
709#endif
710
e52fcb24 711 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 712 skb_put(skb, len);
e52fcb24 713 skb->rxhash = tpa_info->rxhash;
a334b5fb 714 skb->l4_rxhash = tpa_info->l4_rxhash;
9f6c9258
DK
715
716 skb->protocol = eth_type_trans(skb, bp->dev);
717 skb->ip_summed = CHECKSUM_UNNECESSARY;
718
621b4d66
DK
719 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
720 skb, cqe, cqe_idx)) {
619c5cb6
VZ
721 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
722 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9969085e 723 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 724 } else {
51c1a580
MS
725 DP(NETIF_MSG_RX_STATUS,
726 "Failed to allocate new pages - dropping packet!\n");
40955532 727 dev_kfree_skb_any(skb);
9f6c9258
DK
728 }
729
730
e52fcb24
ED
731 /* put new data in bin */
732 rx_buf->data = new_data;
9f6c9258 733
619c5cb6 734 return;
9f6c9258 735 }
d46d132c 736 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
737drop:
738 /* drop the packet and keep the buffer in the bin */
739 DP(NETIF_MSG_RX_STATUS,
740 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 741 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
742}
743
1191cb83
ED
744static int bnx2x_alloc_rx_data(struct bnx2x *bp,
745 struct bnx2x_fastpath *fp, u16 index)
746{
747 u8 *data;
748 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
749 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
750 dma_addr_t mapping;
751
d46d132c 752 data = bnx2x_frag_alloc(fp);
1191cb83
ED
753 if (unlikely(data == NULL))
754 return -ENOMEM;
755
756 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
757 fp->rx_buf_size,
758 DMA_FROM_DEVICE);
759 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 760 bnx2x_frag_free(fp, data);
1191cb83
ED
761 BNX2X_ERR("Can't map rx data\n");
762 return -ENOMEM;
763 }
764
765 rx_buf->data = data;
766 dma_unmap_addr_set(rx_buf, mapping, mapping);
767
768 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
769 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
770
771 return 0;
772}
773
15192a8c
BW
774static
775void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
776 struct bnx2x_fastpath *fp,
777 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 778{
e488921f
MS
779 /* Do nothing if no L4 csum validation was done.
780 * We do not check whether IP csum was validated. For IPv4 we assume
781 * that if the card got as far as validating the L4 csum, it also
782 * validated the IP csum. IPv6 has no IP csum.
783 */
d6cb3e41 784 if (cqe->fast_path_cqe.status_flags &
e488921f 785 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
786 return;
787
e488921f 788 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
789
790 if (cqe->fast_path_cqe.type_error_flags &
791 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
792 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 793 qstats->hw_csum_err++;
d6cb3e41
ED
794 else
795 skb->ip_summed = CHECKSUM_UNNECESSARY;
796}
9f6c9258
DK
797
798int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
799{
800 struct bnx2x *bp = fp->bp;
801 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
802 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
803 int rx_pkt = 0;
804
805#ifdef BNX2X_STOP_ON_ERROR
806 if (unlikely(bp->panic))
807 return 0;
808#endif
809
810 /* CQ "next element" is of the size of the regular element,
811 that's why it's ok here */
812 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
813 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
814 hw_comp_cons++;
815
816 bd_cons = fp->rx_bd_cons;
817 bd_prod = fp->rx_bd_prod;
818 bd_prod_fw = bd_prod;
819 sw_comp_cons = fp->rx_comp_cons;
820 sw_comp_prod = fp->rx_comp_prod;
821
822 /* Memory barrier necessary as speculative reads of the rx
823 * buffer can be ahead of the index in the status block
824 */
825 rmb();
826
827 DP(NETIF_MSG_RX_STATUS,
828 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
829 fp->index, hw_comp_cons, sw_comp_cons);
830
831 while (sw_comp_cons != hw_comp_cons) {
832 struct sw_rx_bd *rx_buf = NULL;
833 struct sk_buff *skb;
834 union eth_rx_cqe *cqe;
619c5cb6 835 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 836 u8 cqe_fp_flags;
619c5cb6 837 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 838 u16 len, pad, queue;
e52fcb24 839 u8 *data;
a334b5fb 840 bool l4_rxhash;
9f6c9258 841
619c5cb6
VZ
842#ifdef BNX2X_STOP_ON_ERROR
843 if (unlikely(bp->panic))
844 return 0;
845#endif
846
9f6c9258
DK
847 comp_ring_cons = RCQ_BD(sw_comp_cons);
848 bd_prod = RX_BD(bd_prod);
849 bd_cons = RX_BD(bd_cons);
850
9f6c9258 851 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
852 cqe_fp = &cqe->fast_path_cqe;
853 cqe_fp_flags = cqe_fp->type_error_flags;
854 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 855
51c1a580
MS
856 DP(NETIF_MSG_RX_STATUS,
857 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
858 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
859 cqe_fp_flags, cqe_fp->status_flags,
860 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
861 le16_to_cpu(cqe_fp->vlan_tag),
862 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
863
864 /* is this a slowpath msg? */
619c5cb6 865 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
866 bnx2x_sp_event(fp, cqe);
867 goto next_cqe;
e52fcb24 868 }
621b4d66 869
e52fcb24
ED
870 rx_buf = &fp->rx_buf_ring[bd_cons];
871 data = rx_buf->data;
9f6c9258 872
e52fcb24 873 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
874 struct bnx2x_agg_info *tpa_info;
875 u16 frag_size, pages;
619c5cb6 876#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
877 /* sanity check */
878 if (fp->disable_tpa &&
879 (CQE_TYPE_START(cqe_fp_type) ||
880 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 881 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 882 CQE_TYPE(cqe_fp_type));
619c5cb6 883#endif
9f6c9258 884
e52fcb24
ED
885 if (CQE_TYPE_START(cqe_fp_type)) {
886 u16 queue = cqe_fp->queue_index;
887 DP(NETIF_MSG_RX_STATUS,
888 "calling tpa_start on queue %d\n",
889 queue);
9f6c9258 890
e52fcb24
ED
891 bnx2x_tpa_start(fp, queue,
892 bd_cons, bd_prod,
893 cqe_fp);
621b4d66 894
e52fcb24 895 goto next_rx;
e52fcb24 896
621b4d66
DK
897 }
898 queue = cqe->end_agg_cqe.queue_index;
899 tpa_info = &fp->tpa_info[queue];
900 DP(NETIF_MSG_RX_STATUS,
901 "calling tpa_stop on queue %d\n",
902 queue);
903
904 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
905 tpa_info->len_on_bd;
906
907 if (fp->mode == TPA_MODE_GRO)
908 pages = (frag_size + tpa_info->full_page - 1) /
909 tpa_info->full_page;
910 else
911 pages = SGE_PAGE_ALIGN(frag_size) >>
912 SGE_PAGE_SHIFT;
913
914 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
915 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 916#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
917 if (bp->panic)
918 return 0;
9f6c9258
DK
919#endif
920
621b4d66
DK
921 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
922 goto next_cqe;
e52fcb24
ED
923 }
924 /* non TPA */
621b4d66 925 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
926 pad = cqe_fp->placement_offset;
927 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 928 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
929 pad + RX_COPY_THRESH,
930 DMA_FROM_DEVICE);
931 pad += NET_SKB_PAD;
932 prefetch(data + pad); /* speedup eth_type_trans() */
933 /* is this an error packet? */
934 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 935 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
936 "ERROR flags %x rx packet %u\n",
937 cqe_fp_flags, sw_comp_cons);
15192a8c 938 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
939 goto reuse_rx;
940 }
9f6c9258 941
e52fcb24
ED
942 /* Since we don't have a jumbo ring
943 * copy small packets if mtu > 1500
944 */
945 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
946 (len <= RX_COPY_THRESH)) {
947 skb = netdev_alloc_skb_ip_align(bp->dev, len);
948 if (skb == NULL) {
51c1a580 949 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 950 "ERROR packet dropped because of alloc failure\n");
15192a8c 951 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
952 goto reuse_rx;
953 }
e52fcb24
ED
954 memcpy(skb->data, data + pad, len);
955 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
956 } else {
957 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 958 dma_unmap_single(&bp->pdev->dev,
e52fcb24 959 dma_unmap_addr(rx_buf, mapping),
a8c94b91 960 fp->rx_buf_size,
9f6c9258 961 DMA_FROM_DEVICE);
d46d132c 962 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 963 if (unlikely(!skb)) {
d46d132c 964 bnx2x_frag_free(fp, data);
15192a8c
BW
965 bnx2x_fp_qstats(bp, fp)->
966 rx_skb_alloc_failed++;
e52fcb24
ED
967 goto next_rx;
968 }
9f6c9258 969 skb_reserve(skb, pad);
9f6c9258 970 } else {
51c1a580
MS
971 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
972 "ERROR packet dropped because of alloc failure\n");
15192a8c 973 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 974reuse_rx:
e52fcb24 975 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
976 goto next_rx;
977 }
036d2df9 978 }
9f6c9258 979
036d2df9
DK
980 skb_put(skb, len);
981 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 982
036d2df9 983 /* Set Toeplitz hash for a none-LRO skb */
a334b5fb
ED
984 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
985 skb->l4_rxhash = l4_rxhash;
9f6c9258 986
036d2df9 987 skb_checksum_none_assert(skb);
f85582f8 988
d6cb3e41 989 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
990 bnx2x_csum_validate(skb, cqe, fp,
991 bnx2x_fp_qstats(bp, fp));
9f6c9258 992
f233cafe 993 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 994
619c5cb6
VZ
995 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
996 PARSING_FLAGS_VLAN)
9bcc0893 997 __vlan_hwaccel_put_tag(skb,
619c5cb6 998 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 999 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
1000
1001
1002next_rx:
e52fcb24 1003 rx_buf->data = NULL;
9f6c9258
DK
1004
1005 bd_cons = NEXT_RX_IDX(bd_cons);
1006 bd_prod = NEXT_RX_IDX(bd_prod);
1007 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1008 rx_pkt++;
1009next_cqe:
1010 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1011 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1012
1013 if (rx_pkt == budget)
1014 break;
1015 } /* while */
1016
1017 fp->rx_bd_cons = bd_cons;
1018 fp->rx_bd_prod = bd_prod_fw;
1019 fp->rx_comp_cons = sw_comp_cons;
1020 fp->rx_comp_prod = sw_comp_prod;
1021
1022 /* Update producers */
1023 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1024 fp->rx_sge_prod);
1025
1026 fp->rx_pkt += rx_pkt;
1027 fp->rx_calls++;
1028
1029 return rx_pkt;
1030}
1031
1032static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1033{
1034 struct bnx2x_fastpath *fp = fp_cookie;
1035 struct bnx2x *bp = fp->bp;
6383c0b3 1036 u8 cos;
9f6c9258 1037
51c1a580
MS
1038 DP(NETIF_MSG_INTR,
1039 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3
DK
1040 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1041 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1042
1043#ifdef BNX2X_STOP_ON_ERROR
1044 if (unlikely(bp->panic))
1045 return IRQ_HANDLED;
1046#endif
1047
1048 /* Handle Rx and Tx according to MSI-X vector */
1049 prefetch(fp->rx_cons_sb);
6383c0b3
AE
1050
1051 for_each_cos_in_tx_queue(fp, cos)
65565884 1052 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1053
523224a3 1054 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1055 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1056
1057 return IRQ_HANDLED;
1058}
1059
9f6c9258
DK
1060/* HW Lock for shared dual port PHYs */
1061void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1062{
1063 mutex_lock(&bp->port.phy_mutex);
1064
8203c4b6 1065 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1066}
1067
1068void bnx2x_release_phy_lock(struct bnx2x *bp)
1069{
8203c4b6 1070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1071
1072 mutex_unlock(&bp->port.phy_mutex);
1073}
1074
0793f83f
DK
1075/* calculates MF speed according to current linespeed and MF configuration */
1076u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1077{
1078 u16 line_speed = bp->link_vars.line_speed;
1079 if (IS_MF(bp)) {
faa6fcbb
DK
1080 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1081 bp->mf_config[BP_VN(bp)]);
1082
1083 /* Calculate the current MAX line speed limit for the MF
1084 * devices
0793f83f 1085 */
faa6fcbb
DK
1086 if (IS_MF_SI(bp))
1087 line_speed = (line_speed * maxCfg) / 100;
1088 else { /* SD mode */
0793f83f
DK
1089 u16 vn_max_rate = maxCfg * 100;
1090
1091 if (vn_max_rate < line_speed)
1092 line_speed = vn_max_rate;
faa6fcbb 1093 }
0793f83f
DK
1094 }
1095
1096 return line_speed;
1097}
1098
2ae17f66
VZ
1099/**
1100 * bnx2x_fill_report_data - fill link report data to report
1101 *
1102 * @bp: driver handle
1103 * @data: link state to update
1104 *
1105 * It uses a none-atomic bit operations because is called under the mutex.
1106 */
1191cb83
ED
1107static void bnx2x_fill_report_data(struct bnx2x *bp,
1108 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1109{
1110 u16 line_speed = bnx2x_get_mf_speed(bp);
1111
1112 memset(data, 0, sizeof(*data));
1113
1114 /* Fill the report data: efective line speed */
1115 data->line_speed = line_speed;
1116
1117 /* Link is down */
1118 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1119 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1120 &data->link_report_flags);
1121
1122 /* Full DUPLEX */
1123 if (bp->link_vars.duplex == DUPLEX_FULL)
1124 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1125
1126 /* Rx Flow Control is ON */
1127 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1128 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1129
1130 /* Tx Flow Control is ON */
1131 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1132 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1133}
1134
1135/**
1136 * bnx2x_link_report - report link status to OS.
1137 *
1138 * @bp: driver handle
1139 *
1140 * Calls the __bnx2x_link_report() under the same locking scheme
1141 * as a link/PHY state managing code to ensure a consistent link
1142 * reporting.
1143 */
1144
9f6c9258
DK
1145void bnx2x_link_report(struct bnx2x *bp)
1146{
2ae17f66
VZ
1147 bnx2x_acquire_phy_lock(bp);
1148 __bnx2x_link_report(bp);
1149 bnx2x_release_phy_lock(bp);
1150}
9f6c9258 1151
2ae17f66
VZ
1152/**
1153 * __bnx2x_link_report - report link status to OS.
1154 *
1155 * @bp: driver handle
1156 *
1157 * None atomic inmlementation.
1158 * Should be called under the phy_lock.
1159 */
1160void __bnx2x_link_report(struct bnx2x *bp)
1161{
1162 struct bnx2x_link_report_data cur_data;
9f6c9258 1163
2ae17f66 1164 /* reread mf_cfg */
ad5afc89 1165 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1166 bnx2x_read_mf_cfg(bp);
1167
1168 /* Read the current link report info */
1169 bnx2x_fill_report_data(bp, &cur_data);
1170
1171 /* Don't report link down or exactly the same link status twice */
1172 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1173 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1174 &bp->last_reported_link.link_report_flags) &&
1175 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1176 &cur_data.link_report_flags)))
1177 return;
1178
1179 bp->link_cnt++;
9f6c9258 1180
2ae17f66
VZ
1181 /* We are going to report a new link parameters now -
1182 * remember the current data for the next time.
1183 */
1184 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1185
2ae17f66
VZ
1186 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1187 &cur_data.link_report_flags)) {
1188 netif_carrier_off(bp->dev);
1189 netdev_err(bp->dev, "NIC Link is Down\n");
1190 return;
1191 } else {
94f05b0f
JP
1192 const char *duplex;
1193 const char *flow;
1194
2ae17f66 1195 netif_carrier_on(bp->dev);
9f6c9258 1196
2ae17f66
VZ
1197 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1198 &cur_data.link_report_flags))
94f05b0f 1199 duplex = "full";
9f6c9258 1200 else
94f05b0f 1201 duplex = "half";
9f6c9258 1202
2ae17f66
VZ
1203 /* Handle the FC at the end so that only these flags would be
1204 * possibly set. This way we may easily check if there is no FC
1205 * enabled.
1206 */
1207 if (cur_data.link_report_flags) {
1208 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1209 &cur_data.link_report_flags)) {
2ae17f66
VZ
1210 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1211 &cur_data.link_report_flags))
94f05b0f
JP
1212 flow = "ON - receive & transmit";
1213 else
1214 flow = "ON - receive";
9f6c9258 1215 } else {
94f05b0f 1216 flow = "ON - transmit";
9f6c9258 1217 }
94f05b0f
JP
1218 } else {
1219 flow = "none";
9f6c9258 1220 }
94f05b0f
JP
1221 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1222 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1223 }
1224}
1225
1191cb83
ED
1226static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1227{
1228 int i;
1229
1230 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1231 struct eth_rx_sge *sge;
1232
1233 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1234 sge->addr_hi =
1235 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1236 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1237
1238 sge->addr_lo =
1239 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1240 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1241 }
1242}
1243
1244static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1245 struct bnx2x_fastpath *fp, int last)
1246{
1247 int i;
1248
1249 for (i = 0; i < last; i++) {
1250 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1251 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1252 u8 *data = first_buf->data;
1253
1254 if (data == NULL) {
1255 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1256 continue;
1257 }
1258 if (tpa_info->tpa_state == BNX2X_TPA_START)
1259 dma_unmap_single(&bp->pdev->dev,
1260 dma_unmap_addr(first_buf, mapping),
1261 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1262 bnx2x_frag_free(fp, data);
1191cb83
ED
1263 first_buf->data = NULL;
1264 }
1265}
1266
55c11941
MS
1267void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1268{
1269 int j;
1270
1271 for_each_rx_queue_cnic(bp, j) {
1272 struct bnx2x_fastpath *fp = &bp->fp[j];
1273
1274 fp->rx_bd_cons = 0;
1275
1276 /* Activate BD ring */
1277 /* Warning!
1278 * this will generate an interrupt (to the TSTORM)
1279 * must only be done after chip is initialized
1280 */
1281 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1282 fp->rx_sge_prod);
1283 }
1284}
1285
9f6c9258
DK
1286void bnx2x_init_rx_rings(struct bnx2x *bp)
1287{
1288 int func = BP_FUNC(bp);
523224a3 1289 u16 ring_prod;
9f6c9258 1290 int i, j;
25141580 1291
b3b83c3f 1292 /* Allocate TPA resources */
55c11941 1293 for_each_eth_queue(bp, j) {
523224a3 1294 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1295
a8c94b91
VZ
1296 DP(NETIF_MSG_IFUP,
1297 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1298
523224a3 1299 if (!fp->disable_tpa) {
619c5cb6 1300 /* Fill the per-aggregtion pool */
dfacf138 1301 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1302 struct bnx2x_agg_info *tpa_info =
1303 &fp->tpa_info[i];
1304 struct sw_rx_bd *first_buf =
1305 &tpa_info->first_buf;
1306
d46d132c 1307 first_buf->data = bnx2x_frag_alloc(fp);
e52fcb24 1308 if (!first_buf->data) {
51c1a580
MS
1309 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1310 j);
9f6c9258
DK
1311 bnx2x_free_tpa_pool(bp, fp, i);
1312 fp->disable_tpa = 1;
1313 break;
1314 }
619c5cb6
VZ
1315 dma_unmap_addr_set(first_buf, mapping, 0);
1316 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1317 }
523224a3
DK
1318
1319 /* "next page" elements initialization */
1320 bnx2x_set_next_page_sgl(fp);
1321
1322 /* set SGEs bit mask */
1323 bnx2x_init_sge_ring_bit_mask(fp);
1324
1325 /* Allocate SGEs and initialize the ring elements */
1326 for (i = 0, ring_prod = 0;
1327 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1328
1329 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1330 BNX2X_ERR("was only able to allocate %d rx sges\n",
1331 i);
1332 BNX2X_ERR("disabling TPA for queue[%d]\n",
1333 j);
523224a3 1334 /* Cleanup already allocated elements */
619c5cb6
VZ
1335 bnx2x_free_rx_sge_range(bp, fp,
1336 ring_prod);
1337 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1338 MAX_AGG_QS(bp));
523224a3
DK
1339 fp->disable_tpa = 1;
1340 ring_prod = 0;
1341 break;
1342 }
1343 ring_prod = NEXT_SGE_IDX(ring_prod);
1344 }
1345
1346 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1347 }
1348 }
1349
55c11941 1350 for_each_eth_queue(bp, j) {
9f6c9258
DK
1351 struct bnx2x_fastpath *fp = &bp->fp[j];
1352
1353 fp->rx_bd_cons = 0;
9f6c9258 1354
b3b83c3f
DK
1355 /* Activate BD ring */
1356 /* Warning!
1357 * this will generate an interrupt (to the TSTORM)
1358 * must only be done after chip is initialized
1359 */
1360 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1361 fp->rx_sge_prod);
9f6c9258 1362
9f6c9258
DK
1363 if (j != 0)
1364 continue;
1365
619c5cb6 1366 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1367 REG_WR(bp, BAR_USTRORM_INTMEM +
1368 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1369 U64_LO(fp->rx_comp_mapping));
1370 REG_WR(bp, BAR_USTRORM_INTMEM +
1371 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1372 U64_HI(fp->rx_comp_mapping));
1373 }
9f6c9258
DK
1374 }
1375}
f85582f8 1376
55c11941 1377static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1378{
6383c0b3 1379 u8 cos;
55c11941 1380 struct bnx2x *bp = fp->bp;
9f6c9258 1381
55c11941
MS
1382 for_each_cos_in_tx_queue(fp, cos) {
1383 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1384 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1385
55c11941
MS
1386 u16 sw_prod = txdata->tx_pkt_prod;
1387 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1388
55c11941
MS
1389 while (sw_cons != sw_prod) {
1390 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1391 &pkts_compl, &bytes_compl);
1392 sw_cons++;
9f6c9258 1393 }
55c11941
MS
1394
1395 netdev_tx_reset_queue(
1396 netdev_get_tx_queue(bp->dev,
1397 txdata->txq_index));
1398 }
1399}
1400
1401static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1402{
1403 int i;
1404
1405 for_each_tx_queue_cnic(bp, i) {
1406 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1407 }
1408}
1409
1410static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1411{
1412 int i;
1413
1414 for_each_eth_queue(bp, i) {
1415 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1416 }
1417}
1418
b3b83c3f
DK
1419static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1420{
1421 struct bnx2x *bp = fp->bp;
1422 int i;
1423
1424 /* ring wasn't allocated */
1425 if (fp->rx_buf_ring == NULL)
1426 return;
1427
1428 for (i = 0; i < NUM_RX_BD; i++) {
1429 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1430 u8 *data = rx_buf->data;
b3b83c3f 1431
e52fcb24 1432 if (data == NULL)
b3b83c3f 1433 continue;
b3b83c3f
DK
1434 dma_unmap_single(&bp->pdev->dev,
1435 dma_unmap_addr(rx_buf, mapping),
1436 fp->rx_buf_size, DMA_FROM_DEVICE);
1437
e52fcb24 1438 rx_buf->data = NULL;
d46d132c 1439 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1440 }
1441}
1442
55c11941
MS
1443static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1444{
1445 int j;
1446
1447 for_each_rx_queue_cnic(bp, j) {
1448 bnx2x_free_rx_bds(&bp->fp[j]);
1449 }
1450}
1451
9f6c9258
DK
1452static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1453{
b3b83c3f 1454 int j;
9f6c9258 1455
55c11941 1456 for_each_eth_queue(bp, j) {
9f6c9258
DK
1457 struct bnx2x_fastpath *fp = &bp->fp[j];
1458
b3b83c3f 1459 bnx2x_free_rx_bds(fp);
9f6c9258 1460
9f6c9258 1461 if (!fp->disable_tpa)
dfacf138 1462 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1463 }
1464}
1465
55c11941
MS
1466void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1467{
1468 bnx2x_free_tx_skbs_cnic(bp);
1469 bnx2x_free_rx_skbs_cnic(bp);
1470}
1471
9f6c9258
DK
1472void bnx2x_free_skbs(struct bnx2x *bp)
1473{
1474 bnx2x_free_tx_skbs(bp);
1475 bnx2x_free_rx_skbs(bp);
1476}
1477
e3835b99
DK
1478void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1479{
1480 /* load old values */
1481 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1482
1483 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1484 /* leave all but MAX value */
1485 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1486
1487 /* set new MAX value */
1488 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1489 & FUNC_MF_CFG_MAX_BW_MASK;
1490
1491 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1492 }
1493}
1494
ca92429f
DK
1495/**
1496 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1497 *
1498 * @bp: driver handle
1499 * @nvecs: number of vectors to be released
1500 */
1501static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1502{
ca92429f 1503 int i, offset = 0;
9f6c9258 1504
ca92429f
DK
1505 if (nvecs == offset)
1506 return;
ad5afc89
AE
1507
1508 /* VFs don't have a default SB */
1509 if (IS_PF(bp)) {
1510 free_irq(bp->msix_table[offset].vector, bp->dev);
1511 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1512 bp->msix_table[offset].vector);
1513 offset++;
1514 }
55c11941
MS
1515
1516 if (CNIC_SUPPORT(bp)) {
1517 if (nvecs == offset)
1518 return;
1519 offset++;
1520 }
ca92429f 1521
ec6ba945 1522 for_each_eth_queue(bp, i) {
ca92429f
DK
1523 if (nvecs == offset)
1524 return;
51c1a580
MS
1525 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1526 i, bp->msix_table[offset].vector);
9f6c9258 1527
ca92429f 1528 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1529 }
1530}
1531
d6214d7a 1532void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1533{
30a5de77 1534 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1535 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1536 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1537
1538 /* vfs don't have a default status block */
1539 if (IS_PF(bp))
1540 nvecs++;
1541
1542 bnx2x_free_msix_irqs(bp, nvecs);
1543 } else {
30a5de77 1544 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1545 }
9f6c9258
DK
1546}
1547
0e8d2ec5 1548int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1549{
1ab4434c 1550 int msix_vec = 0, i, rc;
9f6c9258 1551
1ab4434c
AE
1552 /* VFs don't have a default status block */
1553 if (IS_PF(bp)) {
1554 bp->msix_table[msix_vec].entry = msix_vec;
1555 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1556 bp->msix_table[0].entry);
1557 msix_vec++;
1558 }
9f6c9258 1559
55c11941
MS
1560 /* Cnic requires an msix vector for itself */
1561 if (CNIC_SUPPORT(bp)) {
1562 bp->msix_table[msix_vec].entry = msix_vec;
1563 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1564 msix_vec, bp->msix_table[msix_vec].entry);
1565 msix_vec++;
1566 }
1567
6383c0b3 1568 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1569 for_each_eth_queue(bp, i) {
d6214d7a 1570 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1571 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1572 msix_vec, msix_vec, i);
d6214d7a 1573 msix_vec++;
9f6c9258
DK
1574 }
1575
1ab4434c
AE
1576 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1577 msix_vec);
d6214d7a 1578
1ab4434c 1579 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1580
1581 /*
1582 * reconfigure number of tx/rx queues according to available
1583 * MSI-X vectors
1584 */
55c11941 1585 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1586 /* how less vectors we will have? */
1ab4434c 1587 int diff = msix_vec - rc;
9f6c9258 1588
51c1a580 1589 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1590
1591 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1592
1593 if (rc) {
30a5de77
DK
1594 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1595 goto no_msix;
9f6c9258 1596 }
d6214d7a
DK
1597 /*
1598 * decrease number of queues by number of unallocated entries
1599 */
55c11941
MS
1600 bp->num_ethernet_queues -= diff;
1601 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1602
51c1a580 1603 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1604 bp->num_queues);
1605 } else if (rc > 0) {
1606 /* Get by with single vector */
1607 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1608 if (rc) {
1609 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1610 rc);
1611 goto no_msix;
1612 }
1613
1614 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1615 bp->flags |= USING_SINGLE_MSIX_FLAG;
1616
55c11941
MS
1617 BNX2X_DEV_INFO("set number of queues to 1\n");
1618 bp->num_ethernet_queues = 1;
1619 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1620 } else if (rc < 0) {
51c1a580 1621 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1622 goto no_msix;
9f6c9258
DK
1623 }
1624
1625 bp->flags |= USING_MSIX_FLAG;
1626
1627 return 0;
30a5de77
DK
1628
1629no_msix:
1630 /* fall to INTx if not enough memory */
1631 if (rc == -ENOMEM)
1632 bp->flags |= DISABLE_MSI_FLAG;
1633
1634 return rc;
9f6c9258
DK
1635}
1636
1637static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1638{
ca92429f 1639 int i, rc, offset = 0;
9f6c9258 1640
ad5afc89
AE
1641 /* no default status block for vf */
1642 if (IS_PF(bp)) {
1643 rc = request_irq(bp->msix_table[offset++].vector,
1644 bnx2x_msix_sp_int, 0,
1645 bp->dev->name, bp->dev);
1646 if (rc) {
1647 BNX2X_ERR("request sp irq failed\n");
1648 return -EBUSY;
1649 }
9f6c9258
DK
1650 }
1651
55c11941
MS
1652 if (CNIC_SUPPORT(bp))
1653 offset++;
1654
ec6ba945 1655 for_each_eth_queue(bp, i) {
9f6c9258
DK
1656 struct bnx2x_fastpath *fp = &bp->fp[i];
1657 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1658 bp->dev->name, i);
1659
d6214d7a 1660 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1661 bnx2x_msix_fp_int, 0, fp->name, fp);
1662 if (rc) {
ca92429f
DK
1663 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1664 bp->msix_table[offset].vector, rc);
1665 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1666 return -EBUSY;
1667 }
1668
d6214d7a 1669 offset++;
9f6c9258
DK
1670 }
1671
ec6ba945 1672 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1673 if (IS_PF(bp)) {
1674 offset = 1 + CNIC_SUPPORT(bp);
1675 netdev_info(bp->dev,
1676 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1677 bp->msix_table[0].vector,
1678 0, bp->msix_table[offset].vector,
1679 i - 1, bp->msix_table[offset + i - 1].vector);
1680 } else {
1681 offset = CNIC_SUPPORT(bp);
1682 netdev_info(bp->dev,
1683 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1684 0, bp->msix_table[offset].vector,
1685 i - 1, bp->msix_table[offset + i - 1].vector);
1686 }
9f6c9258
DK
1687 return 0;
1688}
1689
d6214d7a 1690int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1691{
1692 int rc;
1693
1694 rc = pci_enable_msi(bp->pdev);
1695 if (rc) {
51c1a580 1696 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1697 return -1;
1698 }
1699 bp->flags |= USING_MSI_FLAG;
1700
1701 return 0;
1702}
1703
1704static int bnx2x_req_irq(struct bnx2x *bp)
1705{
1706 unsigned long flags;
30a5de77 1707 unsigned int irq;
9f6c9258 1708
30a5de77 1709 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1710 flags = 0;
1711 else
1712 flags = IRQF_SHARED;
1713
30a5de77
DK
1714 if (bp->flags & USING_MSIX_FLAG)
1715 irq = bp->msix_table[0].vector;
1716 else
1717 irq = bp->pdev->irq;
1718
1719 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1720}
1721
1191cb83 1722static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1723{
1724 int rc = 0;
30a5de77
DK
1725 if (bp->flags & USING_MSIX_FLAG &&
1726 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1727 rc = bnx2x_req_msix_irqs(bp);
1728 if (rc)
1729 return rc;
1730 } else {
619c5cb6
VZ
1731 rc = bnx2x_req_irq(bp);
1732 if (rc) {
1733 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1734 return rc;
1735 }
1736 if (bp->flags & USING_MSI_FLAG) {
1737 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1738 netdev_info(bp->dev, "using MSI IRQ %d\n",
1739 bp->dev->irq);
1740 }
1741 if (bp->flags & USING_MSIX_FLAG) {
1742 bp->dev->irq = bp->msix_table[0].vector;
1743 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1744 bp->dev->irq);
619c5cb6
VZ
1745 }
1746 }
1747
1748 return 0;
1749}
1750
55c11941
MS
1751static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1752{
1753 int i;
1754
1755 for_each_rx_queue_cnic(bp, i)
1756 napi_enable(&bnx2x_fp(bp, i, napi));
1757}
1758
1191cb83 1759static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1760{
1761 int i;
1762
55c11941 1763 for_each_eth_queue(bp, i)
9f6c9258
DK
1764 napi_enable(&bnx2x_fp(bp, i, napi));
1765}
1766
55c11941
MS
1767static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1768{
1769 int i;
1770
1771 for_each_rx_queue_cnic(bp, i)
1772 napi_disable(&bnx2x_fp(bp, i, napi));
1773}
1774
1191cb83 1775static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1776{
1777 int i;
1778
55c11941 1779 for_each_eth_queue(bp, i)
9f6c9258
DK
1780 napi_disable(&bnx2x_fp(bp, i, napi));
1781}
1782
1783void bnx2x_netif_start(struct bnx2x *bp)
1784{
4b7ed897
DK
1785 if (netif_running(bp->dev)) {
1786 bnx2x_napi_enable(bp);
55c11941
MS
1787 if (CNIC_LOADED(bp))
1788 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1789 bnx2x_int_enable(bp);
1790 if (bp->state == BNX2X_STATE_OPEN)
1791 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1792 }
1793}
1794
1795void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1796{
1797 bnx2x_int_disable_sync(bp, disable_hw);
1798 bnx2x_napi_disable(bp);
55c11941
MS
1799 if (CNIC_LOADED(bp))
1800 bnx2x_napi_disable_cnic(bp);
9f6c9258 1801}
9f6c9258 1802
8307fa3e
VZ
1803u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1804{
8307fa3e 1805 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1806
55c11941 1807 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1808 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1809 u16 ether_type = ntohs(hdr->h_proto);
1810
1811 /* Skip VLAN tag if present */
1812 if (ether_type == ETH_P_8021Q) {
1813 struct vlan_ethhdr *vhdr =
1814 (struct vlan_ethhdr *)skb->data;
1815
1816 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1817 }
1818
1819 /* If ethertype is FCoE or FIP - use FCoE ring */
1820 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1821 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1822 }
55c11941 1823
cdb9d6ae 1824 /* select a non-FCoE queue */
6383c0b3 1825 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1826}
1827
d6214d7a
DK
1828void bnx2x_set_num_queues(struct bnx2x *bp)
1829{
96305234 1830 /* RSS queues */
55c11941 1831 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1832
a3348722
BW
1833 /* override in STORAGE SD modes */
1834 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1835 bp->num_ethernet_queues = 1;
1836
ec6ba945 1837 /* Add special queues */
55c11941
MS
1838 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1839 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1840
1841 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1842}
1843
cdb9d6ae
VZ
1844/**
1845 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1846 *
1847 * @bp: Driver handle
1848 *
1849 * We currently support for at most 16 Tx queues for each CoS thus we will
1850 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1851 * bp->max_cos.
1852 *
1853 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1854 * index after all ETH L2 indices.
1855 *
1856 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1857 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1858 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1859 *
1860 * The proper configuration of skb->queue_mapping is handled by
1861 * bnx2x_select_queue() and __skb_tx_hash().
1862 *
1863 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1864 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1865 */
55c11941 1866static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1867{
6383c0b3 1868 int rc, tx, rx;
ec6ba945 1869
65565884 1870 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1871 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1872
6383c0b3 1873/* account for fcoe queue */
55c11941
MS
1874 if (include_cnic && !NO_FCOE(bp)) {
1875 rx++;
1876 tx++;
6383c0b3 1877 }
6383c0b3
AE
1878
1879 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1880 if (rc) {
1881 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1882 return rc;
1883 }
1884 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1885 if (rc) {
1886 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1887 return rc;
1888 }
1889
51c1a580 1890 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1891 tx, rx);
1892
ec6ba945
VZ
1893 return rc;
1894}
1895
1191cb83 1896static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1897{
1898 int i;
1899
1900 for_each_queue(bp, i) {
1901 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1902 u32 mtu;
a8c94b91
VZ
1903
1904 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1905 if (IS_FCOE_IDX(i))
1906 /*
1907 * Although there are no IP frames expected to arrive to
1908 * this ring we still want to add an
1909 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1910 * overrun attack.
1911 */
e52fcb24 1912 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1913 else
e52fcb24
ED
1914 mtu = bp->dev->mtu;
1915 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1916 IP_HEADER_ALIGNMENT_PADDING +
1917 ETH_OVREHEAD +
1918 mtu +
1919 BNX2X_FW_RX_ALIGN_END;
1920 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
d46d132c
ED
1921 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1922 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1923 else
1924 fp->rx_frag_size = 0;
a8c94b91
VZ
1925 }
1926}
1927
1191cb83 1928static int bnx2x_init_rss_pf(struct bnx2x *bp)
619c5cb6
VZ
1929{
1930 int i;
619c5cb6
VZ
1931 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1932
96305234 1933 /* Prepare the initial contents fo the indirection table if RSS is
619c5cb6
VZ
1934 * enabled
1935 */
5d317c6a
MS
1936 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1937 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1938 bp->fp->cl_id +
1939 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1940
1941 /*
1942 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1943 * per-port, so if explicit configuration is needed , do it only
1944 * for a PMF.
1945 *
1946 * For 57712 and newer on the other hand it's a per-function
1947 * configuration.
1948 */
5d317c6a 1949 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1950}
1951
96305234 1952int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
5d317c6a 1953 bool config_hash)
619c5cb6 1954{
3b603066 1955 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1956
1957 /* Although RSS is meaningless when there is a single HW queue we
1958 * still need it enabled in order to have HW Rx hash generated.
1959 *
1960 * if (!is_eth_multi(bp))
1961 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1962 */
1963
96305234 1964 params.rss_obj = rss_obj;
619c5cb6
VZ
1965
1966 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1967
96305234 1968 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
619c5cb6 1969
96305234
DK
1970 /* RSS configuration */
1971 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1972 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1973 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1974 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
5d317c6a
MS
1975 if (rss_obj->udp_rss_v4)
1976 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1977 if (rss_obj->udp_rss_v6)
1978 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
619c5cb6 1979
96305234
DK
1980 /* Hash bits */
1981 params.rss_result_mask = MULTI_MASK;
619c5cb6 1982
5d317c6a 1983 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 1984
96305234
DK
1985 if (config_hash) {
1986 /* RSS keys */
8376d0bc 1987 prandom_bytes(params.rss_key, sizeof(params.rss_key));
96305234 1988 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
1989 }
1990
1991 return bnx2x_config_rss(bp, &params);
1992}
1993
1191cb83 1994static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 1995{
3b603066 1996 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
1997
1998 /* Prepare parameters for function state transitions */
1999 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2000
2001 func_params.f_obj = &bp->func_obj;
2002 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2003
2004 func_params.params.hw_init.load_phase = load_code;
2005
2006 return bnx2x_func_state_change(bp, &func_params);
2007}
2008
2009/*
2010 * Cleans the object that have internal lists without sending
2011 * ramrods. Should be run when interrutps are disabled.
2012 */
7fa6f340 2013void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2014{
2015 int rc;
2016 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2017 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2018 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2019
2020 /***************** Cleanup MACs' object first *************************/
2021
2022 /* Wait for completion of requested */
2023 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2024 /* Perform a dry cleanup */
2025 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2026
2027 /* Clean ETH primary MAC */
2028 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2029 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2030 &ramrod_flags);
2031 if (rc != 0)
2032 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2033
2034 /* Cleanup UC list */
2035 vlan_mac_flags = 0;
2036 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2037 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2038 &ramrod_flags);
2039 if (rc != 0)
2040 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2041
2042 /***************** Now clean mcast object *****************************/
2043 rparam.mcast_obj = &bp->mcast_obj;
2044 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2045
2046 /* Add a DEL command... */
2047 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2048 if (rc < 0)
51c1a580
MS
2049 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2050 rc);
619c5cb6
VZ
2051
2052 /* ...and wait until all pending commands are cleared */
2053 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2054 while (rc != 0) {
2055 if (rc < 0) {
2056 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2057 rc);
2058 return;
2059 }
2060
2061 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2062 }
2063}
2064
2065#ifndef BNX2X_STOP_ON_ERROR
2066#define LOAD_ERROR_EXIT(bp, label) \
2067 do { \
2068 (bp)->state = BNX2X_STATE_ERROR; \
2069 goto label; \
2070 } while (0)
55c11941
MS
2071
2072#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2073 do { \
2074 bp->cnic_loaded = false; \
2075 goto label; \
2076 } while (0)
2077#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2078#define LOAD_ERROR_EXIT(bp, label) \
2079 do { \
2080 (bp)->state = BNX2X_STATE_ERROR; \
2081 (bp)->panic = 1; \
2082 return -EBUSY; \
2083 } while (0)
55c11941
MS
2084#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2085 do { \
2086 bp->cnic_loaded = false; \
2087 (bp)->panic = 1; \
2088 return -EBUSY; \
2089 } while (0)
2090#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2091
ad5afc89
AE
2092static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2093{
2094 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2095 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2096 return;
2097}
2098
2099static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2100{
8db573ba 2101 int num_groups, vf_headroom = 0;
ad5afc89 2102 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2103
ad5afc89
AE
2104 /* number of queues for statistics is number of eth queues + FCoE */
2105 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2106
ad5afc89
AE
2107 /* Total number of FW statistics requests =
2108 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2109 * and fcoe l2 queue) stats + num of queues (which includes another 1
2110 * for fcoe l2 queue if applicable)
2111 */
2112 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2113
8db573ba
AE
2114 /* vf stats appear in the request list, but their data is allocated by
2115 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2116 * it is used to determine where to place the vf stats queries in the
2117 * request struct
2118 */
2119 if (IS_SRIOV(bp))
6411280a 2120 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2121
ad5afc89
AE
2122 /* Request is built from stats_query_header and an array of
2123 * stats_query_cmd_group each of which contains
2124 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2125 * configured in the stats_query_header.
2126 */
2127 num_groups =
8db573ba
AE
2128 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2129 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2130 1 : 0));
2131
8db573ba
AE
2132 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2133 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2134 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2135 num_groups * sizeof(struct stats_query_cmd_group);
2136
2137 /* Data for statistics requests + stats_counter
2138 * stats_counter holds per-STORM counters that are incremented
2139 * when STORM has finished with the current request.
2140 * memory for FCoE offloaded statistics are counted anyway,
2141 * even if they will not be sent.
2142 * VF stats are not accounted for here as the data of VF stats is stored
2143 * in memory allocated by the VF, not here.
2144 */
2145 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2146 sizeof(struct per_pf_stats) +
2147 sizeof(struct fcoe_statistics_params) +
2148 sizeof(struct per_queue_stats) * num_queue_stats +
2149 sizeof(struct stats_counter);
2150
2151 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2152 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2153
2154 /* Set shortcuts */
2155 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2156 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2157 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2158 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2159 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2160 bp->fw_stats_req_sz;
2161
2162 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2163 U64_HI(bp->fw_stats_req_mapping),
2164 U64_LO(bp->fw_stats_req_mapping));
2165 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2166 U64_HI(bp->fw_stats_data_mapping),
2167 U64_LO(bp->fw_stats_data_mapping));
2168 return 0;
2169
2170alloc_mem_err:
2171 bnx2x_free_fw_stats_mem(bp);
2172 BNX2X_ERR("Can't allocate FW stats memory\n");
2173 return -ENOMEM;
2174}
2175
2176/* send load request to mcp and analyze response */
2177static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2178{
2179 /* init fw_seq */
2180 bp->fw_seq =
2181 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2182 DRV_MSG_SEQ_NUMBER_MASK);
2183 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2184
2185 /* Get current FW pulse sequence */
2186 bp->fw_drv_pulse_wr_seq =
2187 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2188 DRV_PULSE_SEQ_MASK);
2189 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2190
2191 /* load request */
2192 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2193 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2194
2195 /* if mcp fails to respond we must abort */
2196 if (!(*load_code)) {
2197 BNX2X_ERR("MCP response failure, aborting\n");
2198 return -EBUSY;
2199 }
2200
2201 /* If mcp refused (e.g. other port is in diagnostic mode) we
2202 * must abort
2203 */
2204 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2205 BNX2X_ERR("MCP refused load request, aborting\n");
2206 return -EBUSY;
2207 }
2208 return 0;
2209}
2210
2211/* check whether another PF has already loaded FW to chip. In
2212 * virtualized environments a pf from another VM may have already
2213 * initialized the device including loading FW
2214 */
2215int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2216{
2217 /* is another pf loaded on this engine? */
2218 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2219 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2220 /* build my FW version dword */
2221 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2222 (BCM_5710_FW_MINOR_VERSION << 8) +
2223 (BCM_5710_FW_REVISION_VERSION << 16) +
2224 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2225
2226 /* read loaded FW from chip */
2227 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2228
2229 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2230 loaded_fw, my_fw);
2231
2232 /* abort nic load if version mismatch */
2233 if (my_fw != loaded_fw) {
2234 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
452427b0 2235 loaded_fw, my_fw);
ad5afc89
AE
2236 return -EBUSY;
2237 }
2238 }
2239 return 0;
2240}
2241
2242/* returns the "mcp load_code" according to global load_count array */
2243static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2244{
2245 int path = BP_PATH(bp);
2246
2247 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2248 path, load_count[path][0], load_count[path][1],
2249 load_count[path][2]);
2250 load_count[path][0]++;
2251 load_count[path][1 + port]++;
2252 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2253 path, load_count[path][0], load_count[path][1],
2254 load_count[path][2]);
2255 if (load_count[path][0] == 1)
2256 return FW_MSG_CODE_DRV_LOAD_COMMON;
2257 else if (load_count[path][1 + port] == 1)
2258 return FW_MSG_CODE_DRV_LOAD_PORT;
2259 else
2260 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2261}
2262
2263/* mark PMF if applicable */
2264static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2265{
2266 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2267 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2268 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2269 bp->port.pmf = 1;
2270 /* We need the barrier to ensure the ordering between the
2271 * writing to bp->port.pmf here and reading it from the
2272 * bnx2x_periodic_task().
2273 */
2274 smp_mb();
2275 } else {
2276 bp->port.pmf = 0;
452427b0
YM
2277 }
2278
ad5afc89
AE
2279 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2280}
2281
2282static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2283{
2284 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2285 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2286 (bp->common.shmem2_base)) {
2287 if (SHMEM2_HAS(bp, dcc_support))
2288 SHMEM2_WR(bp, dcc_support,
2289 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2290 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2291 if (SHMEM2_HAS(bp, afex_driver_support))
2292 SHMEM2_WR(bp, afex_driver_support,
2293 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2294 }
2295
2296 /* Set AFEX default VLAN tag to an invalid value */
2297 bp->afex_def_vlan_tag = -1;
452427b0
YM
2298}
2299
1191cb83
ED
2300/**
2301 * bnx2x_bz_fp - zero content of the fastpath structure.
2302 *
2303 * @bp: driver handle
2304 * @index: fastpath index to be zeroed
2305 *
2306 * Makes sure the contents of the bp->fp[index].napi is kept
2307 * intact.
2308 */
2309static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2310{
2311 struct bnx2x_fastpath *fp = &bp->fp[index];
15192a8c 2312
65565884 2313 int cos;
1191cb83 2314 struct napi_struct orig_napi = fp->napi;
15192a8c 2315 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1191cb83 2316 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2317 if (fp->tpa_info)
2318 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2319 sizeof(struct bnx2x_agg_info));
2320 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2321
2322 /* Restore the NAPI object as it has been already initialized */
2323 fp->napi = orig_napi;
15192a8c 2324 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2325 fp->bp = bp;
2326 fp->index = index;
2327 if (IS_ETH_FP(fp))
2328 fp->max_cos = bp->max_cos;
2329 else
2330 /* Special queues support only one CoS */
2331 fp->max_cos = 1;
2332
65565884 2333 /* Init txdata pointers */
65565884
MS
2334 if (IS_FCOE_FP(fp))
2335 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2336 if (IS_ETH_FP(fp))
2337 for_each_cos_in_tx_queue(fp, cos)
2338 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2339 BNX2X_NUM_ETH_QUEUES(bp) + index];
2340
1191cb83
ED
2341 /*
2342 * set the tpa flag for each queue. The tpa flag determines the queue
2343 * minimal size so it must be set prior to queue memory allocation
2344 */
2345 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2346 (bp->flags & GRO_ENABLE_FLAG &&
2347 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2348 if (bp->flags & TPA_ENABLE_FLAG)
2349 fp->mode = TPA_MODE_LRO;
2350 else if (bp->flags & GRO_ENABLE_FLAG)
2351 fp->mode = TPA_MODE_GRO;
2352
1191cb83
ED
2353 /* We don't want TPA on an FCoE L2 ring */
2354 if (IS_FCOE_FP(fp))
2355 fp->disable_tpa = 1;
55c11941
MS
2356}
2357
2358int bnx2x_load_cnic(struct bnx2x *bp)
2359{
2360 int i, rc, port = BP_PORT(bp);
2361
2362 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2363
2364 mutex_init(&bp->cnic_mutex);
2365
ad5afc89
AE
2366 if (IS_PF(bp)) {
2367 rc = bnx2x_alloc_mem_cnic(bp);
2368 if (rc) {
2369 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2370 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2371 }
55c11941
MS
2372 }
2373
2374 rc = bnx2x_alloc_fp_mem_cnic(bp);
2375 if (rc) {
2376 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2377 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2378 }
2379
2380 /* Update the number of queues with the cnic queues */
2381 rc = bnx2x_set_real_num_queues(bp, 1);
2382 if (rc) {
2383 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2384 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2385 }
2386
2387 /* Add all CNIC NAPI objects */
2388 bnx2x_add_all_napi_cnic(bp);
2389 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2390 bnx2x_napi_enable_cnic(bp);
2391
2392 rc = bnx2x_init_hw_func_cnic(bp);
2393 if (rc)
2394 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2395
2396 bnx2x_nic_init_cnic(bp);
2397
ad5afc89
AE
2398 if (IS_PF(bp)) {
2399 /* Enable Timer scan */
2400 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2401
2402 /* setup cnic queues */
2403 for_each_cnic_queue(bp, i) {
2404 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2405 if (rc) {
2406 BNX2X_ERR("Queue setup failed\n");
2407 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2408 }
55c11941
MS
2409 }
2410 }
2411
2412 /* Initialize Rx filter. */
2413 netif_addr_lock_bh(bp->dev);
2414 bnx2x_set_rx_mode(bp->dev);
2415 netif_addr_unlock_bh(bp->dev);
2416
2417 /* re-read iscsi info */
2418 bnx2x_get_iscsi_info(bp);
2419 bnx2x_setup_cnic_irq_info(bp);
2420 bnx2x_setup_cnic_info(bp);
2421 bp->cnic_loaded = true;
2422 if (bp->state == BNX2X_STATE_OPEN)
2423 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2424
2425
2426 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2427
2428 return 0;
2429
2430#ifndef BNX2X_STOP_ON_ERROR
2431load_error_cnic2:
2432 /* Disable Timer scan */
2433 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2434
2435load_error_cnic1:
2436 bnx2x_napi_disable_cnic(bp);
2437 /* Update the number of queues without the cnic queues */
2438 rc = bnx2x_set_real_num_queues(bp, 0);
2439 if (rc)
2440 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2441load_error_cnic0:
2442 BNX2X_ERR("CNIC-related load failed\n");
2443 bnx2x_free_fp_mem_cnic(bp);
2444 bnx2x_free_mem_cnic(bp);
2445 return rc;
2446#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2447}
2448
9f6c9258
DK
2449/* must be called with rtnl_lock */
2450int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2451{
619c5cb6 2452 int port = BP_PORT(bp);
ad5afc89 2453 int i, rc = 0, load_code = 0;
9f6c9258 2454
55c11941
MS
2455 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2456 DP(NETIF_MSG_IFUP,
2457 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2458
9f6c9258 2459#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2460 if (unlikely(bp->panic)) {
2461 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2462 return -EPERM;
51c1a580 2463 }
9f6c9258
DK
2464#endif
2465
2466 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2467
2ae17f66
VZ
2468 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2469 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2470 &bp->last_reported_link.link_report_flags);
2ae17f66 2471
ad5afc89
AE
2472 if (IS_PF(bp))
2473 /* must be called before memory allocation and HW init */
2474 bnx2x_ilt_set_info(bp);
523224a3 2475
6383c0b3
AE
2476 /*
2477 * Zero fastpath structures preserving invariants like napi, which are
2478 * allocated only once, fp index, max_cos, bp pointer.
65565884 2479 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2480 */
51c1a580 2481 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2482 for_each_queue(bp, i)
2483 bnx2x_bz_fp(bp, i);
55c11941
MS
2484 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2485 bp->num_cnic_queues) *
2486 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2487
55c11941 2488 bp->fcoe_init = false;
6383c0b3 2489
a8c94b91
VZ
2490 /* Set the receive queues buffer size */
2491 bnx2x_set_rx_buf_size(bp);
2492
ad5afc89
AE
2493 if (IS_PF(bp)) {
2494 rc = bnx2x_alloc_mem(bp);
2495 if (rc) {
2496 BNX2X_ERR("Unable to allocate bp memory\n");
2497 return rc;
2498 }
2499 }
2500
2501 /* Allocated memory for FW statistics */
2502 if (bnx2x_alloc_fw_stats_mem(bp))
2503 LOAD_ERROR_EXIT(bp, load_error0);
2504
2505 /* need to be done after alloc mem, since it's self adjusting to amount
2506 * of memory available for RSS queues
2507 */
2508 rc = bnx2x_alloc_fp_mem(bp);
2509 if (rc) {
2510 BNX2X_ERR("Unable to allocate memory for fps\n");
2511 LOAD_ERROR_EXIT(bp, load_error0);
2512 }
d6214d7a 2513
8d9ac297
AE
2514 /* request pf to initialize status blocks */
2515 if (IS_VF(bp)) {
2516 rc = bnx2x_vfpf_init(bp);
2517 if (rc)
2518 LOAD_ERROR_EXIT(bp, load_error0);
2519 }
2520
b3b83c3f
DK
2521 /* As long as bnx2x_alloc_mem() may possibly update
2522 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2523 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2524 */
55c11941 2525 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2526 if (rc) {
ec6ba945 2527 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2528 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2529 }
2530
6383c0b3
AE
2531 /* configure multi cos mappings in kernel.
2532 * this configuration may be overriden by a multi class queue discipline
2533 * or by a dcbx negotiation result.
2534 */
2535 bnx2x_setup_tc(bp->dev, bp->max_cos);
2536
26614ba5
MS
2537 /* Add all NAPI objects */
2538 bnx2x_add_all_napi(bp);
55c11941 2539 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2540 bnx2x_napi_enable(bp);
2541
ad5afc89
AE
2542 if (IS_PF(bp)) {
2543 /* set pf load just before approaching the MCP */
2544 bnx2x_set_pf_load(bp);
2545
2546 /* if mcp exists send load request and analyze response */
2547 if (!BP_NOMCP(bp)) {
2548 /* attempt to load pf */
2549 rc = bnx2x_nic_load_request(bp, &load_code);
2550 if (rc)
2551 LOAD_ERROR_EXIT(bp, load_error1);
2552
2553 /* what did mcp say? */
2554 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2555 if (rc) {
2556 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2557 LOAD_ERROR_EXIT(bp, load_error2);
2558 }
ad5afc89
AE
2559 } else {
2560 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2561 }
9f6c9258 2562
ad5afc89
AE
2563 /* mark pmf if applicable */
2564 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2565
ad5afc89
AE
2566 /* Init Function state controlling object */
2567 bnx2x__init_func_obj(bp);
6383c0b3 2568
ad5afc89
AE
2569 /* Initialize HW */
2570 rc = bnx2x_init_hw(bp, load_code);
2571 if (rc) {
2572 BNX2X_ERR("HW init failed, aborting\n");
2573 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2574 LOAD_ERROR_EXIT(bp, load_error2);
2575 }
9f6c9258
DK
2576 }
2577
d6214d7a
DK
2578 /* Connect to IRQs */
2579 rc = bnx2x_setup_irqs(bp);
523224a3 2580 if (rc) {
ad5afc89
AE
2581 BNX2X_ERR("setup irqs failed\n");
2582 if (IS_PF(bp))
2583 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2584 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2585 }
2586
9f6c9258
DK
2587 /* Setup NIC internals and enable interrupts */
2588 bnx2x_nic_init(bp, load_code);
2589
619c5cb6 2590 /* Init per-function objects */
ad5afc89
AE
2591 if (IS_PF(bp)) {
2592 bnx2x_init_bp_objs(bp);
b56e9670 2593 bnx2x_iov_nic_init(bp);
a3348722 2594
ad5afc89
AE
2595 /* Set AFEX default VLAN tag to an invalid value */
2596 bp->afex_def_vlan_tag = -1;
2597 bnx2x_nic_load_afex_dcc(bp, load_code);
2598 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2599 rc = bnx2x_func_start(bp);
2600 if (rc) {
2601 BNX2X_ERR("Function start failed!\n");
2602 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2603
619c5cb6 2604 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2605 }
9f6c9258 2606
ad5afc89
AE
2607 /* Send LOAD_DONE command to MCP */
2608 if (!BP_NOMCP(bp)) {
2609 load_code = bnx2x_fw_command(bp,
2610 DRV_MSG_CODE_LOAD_DONE, 0);
2611 if (!load_code) {
2612 BNX2X_ERR("MCP response failure, aborting\n");
2613 rc = -EBUSY;
2614 LOAD_ERROR_EXIT(bp, load_error3);
2615 }
2616 }
9f6c9258 2617
ad5afc89
AE
2618 /* setup the leading queue */
2619 rc = bnx2x_setup_leading(bp);
51c1a580 2620 if (rc) {
ad5afc89 2621 BNX2X_ERR("Setup leading failed!\n");
55c11941 2622 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2623 }
523224a3 2624
ad5afc89
AE
2625 /* set up the rest of the queues */
2626 for_each_nondefault_eth_queue(bp, i) {
2627 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2628 if (rc) {
2629 BNX2X_ERR("Queue setup failed\n");
2630 LOAD_ERROR_EXIT(bp, load_error3);
2631 }
2632 }
2633
2634 /* setup rss */
2635 rc = bnx2x_init_rss_pf(bp);
2636 if (rc) {
2637 BNX2X_ERR("PF RSS init failed\n");
2638 LOAD_ERROR_EXIT(bp, load_error3);
2639 }
8d9ac297
AE
2640
2641 } else { /* vf */
2642 for_each_eth_queue(bp, i) {
2643 rc = bnx2x_vfpf_setup_q(bp, i);
2644 if (rc) {
2645 BNX2X_ERR("Queue setup failed\n");
2646 LOAD_ERROR_EXIT(bp, load_error3);
2647 }
2648 }
51c1a580 2649 }
619c5cb6 2650
523224a3
DK
2651 /* Now when Clients are configured we are ready to work */
2652 bp->state = BNX2X_STATE_OPEN;
2653
619c5cb6 2654 /* Configure a ucast MAC */
ad5afc89
AE
2655 if (IS_PF(bp))
2656 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297
AE
2657 else /* vf */
2658 rc = bnx2x_vfpf_set_mac(bp);
51c1a580
MS
2659 if (rc) {
2660 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2661 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2662 }
6e30dd4e 2663
ad5afc89 2664 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2665 bnx2x_update_max_mf_config(bp, bp->pending_max);
2666 bp->pending_max = 0;
2667 }
2668
ad5afc89
AE
2669 if (bp->port.pmf) {
2670 rc = bnx2x_initial_phy_init(bp, load_mode);
2671 if (rc)
2672 LOAD_ERROR_EXIT(bp, load_error3);
2673 }
c63da990 2674 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2675
619c5cb6
VZ
2676 /* Start fast path */
2677
2678 /* Initialize Rx filter. */
2679 netif_addr_lock_bh(bp->dev);
6e30dd4e 2680 bnx2x_set_rx_mode(bp->dev);
619c5cb6 2681 netif_addr_unlock_bh(bp->dev);
6e30dd4e 2682
619c5cb6 2683 /* Start the Tx */
9f6c9258
DK
2684 switch (load_mode) {
2685 case LOAD_NORMAL:
523224a3
DK
2686 /* Tx queue should be only reenabled */
2687 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2688 break;
2689
2690 case LOAD_OPEN:
2691 netif_tx_start_all_queues(bp->dev);
523224a3 2692 smp_mb__after_clear_bit();
9f6c9258
DK
2693 break;
2694
2695 case LOAD_DIAG:
8970b2e4 2696 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2697 bp->state = BNX2X_STATE_DIAG;
2698 break;
2699
2700 default:
2701 break;
2702 }
2703
00253a8c 2704 if (bp->port.pmf)
4c704899 2705 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2706 else
9f6c9258
DK
2707 bnx2x__link_status_update(bp);
2708
2709 /* start the timer */
2710 mod_timer(&bp->timer, jiffies + bp->current_interval);
2711
55c11941
MS
2712 if (CNIC_ENABLED(bp))
2713 bnx2x_load_cnic(bp);
9f6c9258 2714
ad5afc89
AE
2715 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2716 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2717 u32 val;
2718 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2719 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2720 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2721 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2722 }
2723
619c5cb6 2724 /* Wait for all pending SP commands to complete */
ad5afc89 2725 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2726 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2727 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2728 return -EBUSY;
2729 }
6891dd25 2730
9876879f
BW
2731 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2732 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2733 bnx2x_dcbx_init(bp, false);
2734
55c11941
MS
2735 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2736
9f6c9258
DK
2737 return 0;
2738
619c5cb6 2739#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2740load_error3:
ad5afc89
AE
2741 if (IS_PF(bp)) {
2742 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2743
ad5afc89
AE
2744 /* Clean queueable objects */
2745 bnx2x_squeeze_objects(bp);
2746 }
619c5cb6 2747
9f6c9258
DK
2748 /* Free SKBs, SGEs, TPA pool and driver internals */
2749 bnx2x_free_skbs(bp);
ec6ba945 2750 for_each_rx_queue(bp, i)
9f6c9258 2751 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2752
9f6c9258 2753 /* Release IRQs */
d6214d7a
DK
2754 bnx2x_free_irq(bp);
2755load_error2:
ad5afc89 2756 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2757 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2758 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2759 }
2760
2761 bp->port.pmf = 0;
9f6c9258
DK
2762load_error1:
2763 bnx2x_napi_disable(bp);
722c6f58 2764 bnx2x_del_all_napi(bp);
ad5afc89 2765
889b9af3 2766 /* clear pf_load status, as it was already set */
ad5afc89
AE
2767 if (IS_PF(bp))
2768 bnx2x_clear_pf_load(bp);
d6214d7a 2769load_error0:
ad5afc89
AE
2770 bnx2x_free_fp_mem(bp);
2771 bnx2x_free_fw_stats_mem(bp);
9f6c9258
DK
2772 bnx2x_free_mem(bp);
2773
2774 return rc;
619c5cb6 2775#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2776}
2777
7fa6f340 2778int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2779{
2780 u8 rc = 0, cos, i;
2781
2782 /* Wait until tx fastpath tasks complete */
2783 for_each_tx_queue(bp, i) {
2784 struct bnx2x_fastpath *fp = &bp->fp[i];
2785
2786 for_each_cos_in_tx_queue(fp, cos)
2787 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2788 if (rc)
2789 return rc;
2790 }
2791 return 0;
2792}
2793
9f6c9258 2794/* must be called with rtnl_lock */
5d07d868 2795int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2796{
2797 int i;
c9ee9206
VZ
2798 bool global = false;
2799
55c11941
MS
2800 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2801
9ce392d4 2802 /* mark driver is unloaded in shmem2 */
ad5afc89 2803 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2804 u32 val;
2805 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2806 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2807 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2808 }
2809
80bfe5cc 2810 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2811 (bp->state == BNX2X_STATE_CLOSED ||
2812 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2813 /* We can get here if the driver has been unloaded
2814 * during parity error recovery and is either waiting for a
2815 * leader to complete or for other functions to unload and
2816 * then ifdown has been issued. In this case we want to
2817 * unload and let other functions to complete a recovery
2818 * process.
2819 */
9f6c9258
DK
2820 bp->recovery_state = BNX2X_RECOVERY_DONE;
2821 bp->is_leader = 0;
c9ee9206
VZ
2822 bnx2x_release_leader_lock(bp);
2823 smp_mb();
2824
51c1a580
MS
2825 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2826 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2827 return -EINVAL;
2828 }
2829
80bfe5cc
YM
2830 /* Nothing to do during unload if previous bnx2x_nic_load()
2831 * have not completed succesfully - all resourses are released.
2832 *
2833 * we can get here only after unsuccessful ndo_* callback, during which
2834 * dev->IFF_UP flag is still on.
2835 */
2836 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2837 return 0;
2838
2839 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2840 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2841 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2842 */
2843 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2844 smp_mb();
2845
55c11941
MS
2846 if (CNIC_LOADED(bp))
2847 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2848
9505ee37
VZ
2849 /* Stop Tx */
2850 bnx2x_tx_disable(bp);
65565884 2851 netdev_reset_tc(bp->dev);
9505ee37 2852
9f6c9258 2853 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2854
9f6c9258 2855 del_timer_sync(&bp->timer);
f85582f8 2856
ad5afc89
AE
2857 if (IS_PF(bp)) {
2858 /* Set ALWAYS_ALIVE bit in shmem */
2859 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2860 bnx2x_drv_pulse(bp);
2861 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2862 bnx2x_save_statistics(bp);
2863 }
9f6c9258 2864
ad5afc89
AE
2865 /* wait till consumers catch up with producers in all queues */
2866 bnx2x_drain_tx_queues(bp);
9f6c9258 2867
9b176b6b
AE
2868 /* if VF indicate to PF this function is going down (PF will delete sp
2869 * elements and clear initializations
2870 */
2871 if (IS_VF(bp))
2872 bnx2x_vfpf_close_vf(bp);
2873 else if (unload_mode != UNLOAD_RECOVERY)
2874 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2875 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2876 else {
c9ee9206
VZ
2877 /* Send the UNLOAD_REQUEST to the MCP */
2878 bnx2x_send_unload_req(bp, unload_mode);
2879
2880 /*
2881 * Prevent transactions to host from the functions on the
2882 * engine that doesn't reset global blocks in case of global
2883 * attention once gloabl blocks are reset and gates are opened
2884 * (the engine which leader will perform the recovery
2885 * last).
2886 */
2887 if (!CHIP_IS_E1x(bp))
2888 bnx2x_pf_disable(bp);
2889
2890 /* Disable HW interrupts, NAPI */
523224a3 2891 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2892 /* Delete all NAPI objects */
2893 bnx2x_del_all_napi(bp);
55c11941
MS
2894 if (CNIC_LOADED(bp))
2895 bnx2x_del_all_napi_cnic(bp);
523224a3 2896 /* Release IRQs */
d6214d7a 2897 bnx2x_free_irq(bp);
c9ee9206
VZ
2898
2899 /* Report UNLOAD_DONE to MCP */
5d07d868 2900 bnx2x_send_unload_done(bp, false);
523224a3 2901 }
9f6c9258 2902
619c5cb6
VZ
2903 /*
2904 * At this stage no more interrupts will arrive so we may safly clean
2905 * the queueable objects here in case they failed to get cleaned so far.
2906 */
ad5afc89
AE
2907 if (IS_PF(bp))
2908 bnx2x_squeeze_objects(bp);
619c5cb6 2909
79616895
VZ
2910 /* There should be no more pending SP commands at this stage */
2911 bp->sp_state = 0;
2912
9f6c9258
DK
2913 bp->port.pmf = 0;
2914
2915 /* Free SKBs, SGEs, TPA pool and driver internals */
2916 bnx2x_free_skbs(bp);
55c11941
MS
2917 if (CNIC_LOADED(bp))
2918 bnx2x_free_skbs_cnic(bp);
ec6ba945 2919 for_each_rx_queue(bp, i)
9f6c9258 2920 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2921
ad5afc89
AE
2922 bnx2x_free_fp_mem(bp);
2923 if (CNIC_LOADED(bp))
55c11941 2924 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2925
ad5afc89
AE
2926 if (IS_PF(bp)) {
2927 bnx2x_free_mem(bp);
2928 if (CNIC_LOADED(bp))
2929 bnx2x_free_mem_cnic(bp);
2930 }
9f6c9258 2931 bp->state = BNX2X_STATE_CLOSED;
55c11941 2932 bp->cnic_loaded = false;
9f6c9258 2933
c9ee9206
VZ
2934 /* Check if there are pending parity attentions. If there are - set
2935 * RECOVERY_IN_PROGRESS.
2936 */
ad5afc89 2937 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2938 bnx2x_set_reset_in_progress(bp);
2939
2940 /* Set RESET_IS_GLOBAL if needed */
2941 if (global)
2942 bnx2x_set_reset_global(bp);
2943 }
2944
2945
9f6c9258
DK
2946 /* The last driver must disable a "close the gate" if there is no
2947 * parity attention or "process kill" pending.
2948 */
ad5afc89
AE
2949 if (IS_PF(bp) &&
2950 !bnx2x_clear_pf_load(bp) &&
2951 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2952 bnx2x_disable_close_the_gate(bp);
2953
55c11941
MS
2954 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2955
9f6c9258
DK
2956 return 0;
2957}
f85582f8 2958
9f6c9258
DK
2959int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2960{
2961 u16 pmcsr;
2962
adf5f6a1
DK
2963 /* If there is no power capability, silently succeed */
2964 if (!bp->pm_cap) {
51c1a580 2965 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
2966 return 0;
2967 }
2968
9f6c9258
DK
2969 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2970
2971 switch (state) {
2972 case PCI_D0:
2973 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2974 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2975 PCI_PM_CTRL_PME_STATUS));
2976
2977 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2978 /* delay required during transition out of D3hot */
2979 msleep(20);
2980 break;
2981
2982 case PCI_D3hot:
2983 /* If there are other clients above don't
2984 shut down the power */
2985 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2986 return 0;
2987 /* Don't shut down the power for emulation and FPGA */
2988 if (CHIP_REV_IS_SLOW(bp))
2989 return 0;
2990
2991 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2992 pmcsr |= 3;
2993
2994 if (bp->wol)
2995 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2996
2997 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2998 pmcsr);
2999
3000 /* No more memory access after this point until
3001 * device is brought back to D0.
3002 */
3003 break;
3004
3005 default:
51c1a580 3006 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3007 return -EINVAL;
3008 }
3009 return 0;
3010}
3011
9f6c9258
DK
3012/*
3013 * net_device service functions
3014 */
d6214d7a 3015int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3016{
3017 int work_done = 0;
6383c0b3 3018 u8 cos;
9f6c9258
DK
3019 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3020 napi);
3021 struct bnx2x *bp = fp->bp;
3022
3023 while (1) {
3024#ifdef BNX2X_STOP_ON_ERROR
3025 if (unlikely(bp->panic)) {
3026 napi_complete(napi);
3027 return 0;
3028 }
3029#endif
3030
6383c0b3 3031 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3032 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3033 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3034
9f6c9258
DK
3035 if (bnx2x_has_rx_work(fp)) {
3036 work_done += bnx2x_rx_int(fp, budget - work_done);
3037
3038 /* must not complete if we consumed full budget */
3039 if (work_done >= budget)
3040 break;
3041 }
3042
3043 /* Fall out from the NAPI loop if needed */
3044 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3045
ec6ba945
VZ
3046 /* No need to update SB for FCoE L2 ring as long as
3047 * it's connected to the default SB and the SB
3048 * has been updated when NAPI was scheduled.
3049 */
3050 if (IS_FCOE_FP(fp)) {
3051 napi_complete(napi);
3052 break;
3053 }
9f6c9258 3054 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3055 /* bnx2x_has_rx_work() reads the status block,
3056 * thus we need to ensure that status block indices
3057 * have been actually read (bnx2x_update_fpsb_idx)
3058 * prior to this check (bnx2x_has_rx_work) so that
3059 * we won't write the "newer" value of the status block
3060 * to IGU (if there was a DMA right after
3061 * bnx2x_has_rx_work and if there is no rmb, the memory
3062 * reading (bnx2x_update_fpsb_idx) may be postponed
3063 * to right before bnx2x_ack_sb). In this case there
3064 * will never be another interrupt until there is
3065 * another update of the status block, while there
3066 * is still unhandled work.
3067 */
9f6c9258
DK
3068 rmb();
3069
3070 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3071 napi_complete(napi);
3072 /* Re-enable interrupts */
51c1a580 3073 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3074 "Update index to %d\n", fp->fp_hc_idx);
3075 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3076 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3077 IGU_INT_ENABLE, 1);
3078 break;
3079 }
3080 }
3081 }
3082
3083 return work_done;
3084}
3085
9f6c9258
DK
3086/* we split the first BD into headers and data BDs
3087 * to ease the pain of our fellow microcode engineers
3088 * we use one mapping for both BDs
9f6c9258 3089 */
91226790
DK
3090static u16 bnx2x_tx_split(struct bnx2x *bp,
3091 struct bnx2x_fp_txdata *txdata,
3092 struct sw_tx_bd *tx_buf,
3093 struct eth_tx_start_bd **tx_bd, u16 hlen,
3094 u16 bd_prod)
9f6c9258
DK
3095{
3096 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3097 struct eth_tx_bd *d_tx_bd;
3098 dma_addr_t mapping;
3099 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3100
3101 /* first fix first BD */
9f6c9258
DK
3102 h_tx_bd->nbytes = cpu_to_le16(hlen);
3103
91226790
DK
3104 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3105 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3106
3107 /* now get a new data BD
3108 * (after the pbd) and fill it */
3109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3110 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3111
3112 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3113 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3114
3115 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3116 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3117 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3118
3119 /* this marks the BD as one that has no individual mapping */
3120 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3121
3122 DP(NETIF_MSG_TX_QUEUED,
3123 "TSO split data size is %d (%x:%x)\n",
3124 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3125
3126 /* update tx_bd */
3127 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3128
3129 return bd_prod;
3130}
3131
86564c3f
YM
3132#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3133#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3134static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3135{
86564c3f
YM
3136 __sum16 tsum = (__force __sum16) csum;
3137
9f6c9258 3138 if (fix > 0)
86564c3f
YM
3139 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3140 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3141
3142 else if (fix < 0)
86564c3f
YM
3143 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3144 csum_partial(t_header, -fix, 0)));
9f6c9258 3145
e2593fcd 3146 return bswab16(tsum);
9f6c9258
DK
3147}
3148
91226790 3149static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3150{
3151 u32 rc;
a848ade4
DK
3152 __u8 prot = 0;
3153 __be16 protocol;
9f6c9258
DK
3154
3155 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3156 return XMIT_PLAIN;
9f6c9258 3157
a848ade4
DK
3158 protocol = vlan_get_protocol(skb);
3159 if (protocol == htons(ETH_P_IPV6)) {
3160 rc = XMIT_CSUM_V6;
3161 prot = ipv6_hdr(skb)->nexthdr;
3162 } else {
3163 rc = XMIT_CSUM_V4;
3164 prot = ip_hdr(skb)->protocol;
3165 }
9f6c9258 3166
a848ade4
DK
3167 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3168 if (inner_ip_hdr(skb)->version == 6) {
3169 rc |= XMIT_CSUM_ENC_V6;
3170 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3171 rc |= XMIT_CSUM_TCP;
9f6c9258 3172 } else {
a848ade4
DK
3173 rc |= XMIT_CSUM_ENC_V4;
3174 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3175 rc |= XMIT_CSUM_TCP;
3176 }
3177 }
a848ade4
DK
3178 if (prot == IPPROTO_TCP)
3179 rc |= XMIT_CSUM_TCP;
9f6c9258 3180
a848ade4
DK
3181 if (skb_is_gso_v6(skb)) {
3182 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
3183 if (rc & XMIT_CSUM_ENC)
3184 rc |= XMIT_GSO_ENC_V6;
3185 } else if (skb_is_gso(skb)) {
3186 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
3187 if (rc & XMIT_CSUM_ENC)
3188 rc |= XMIT_GSO_ENC_V4;
3189 }
9f6c9258
DK
3190
3191 return rc;
3192}
3193
3194#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3195/* check if packet requires linearization (packet is too fragmented)
3196 no need to check fragmentation if page size > 8K (there will be no
3197 violation to FW restrictions) */
3198static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3199 u32 xmit_type)
3200{
3201 int to_copy = 0;
3202 int hlen = 0;
3203 int first_bd_sz = 0;
3204
3205 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3206 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3207
3208 if (xmit_type & XMIT_GSO) {
3209 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3210 /* Check if LSO packet needs to be copied:
3211 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3212 int wnd_size = MAX_FETCH_BD - 3;
3213 /* Number of windows to check */
3214 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3215 int wnd_idx = 0;
3216 int frag_idx = 0;
3217 u32 wnd_sum = 0;
3218
3219 /* Headers length */
3220 hlen = (int)(skb_transport_header(skb) - skb->data) +
3221 tcp_hdrlen(skb);
3222
3223 /* Amount of data (w/o headers) on linear part of SKB*/
3224 first_bd_sz = skb_headlen(skb) - hlen;
3225
3226 wnd_sum = first_bd_sz;
3227
3228 /* Calculate the first sum - it's special */
3229 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3230 wnd_sum +=
9e903e08 3231 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3232
3233 /* If there was data on linear skb data - check it */
3234 if (first_bd_sz > 0) {
3235 if (unlikely(wnd_sum < lso_mss)) {
3236 to_copy = 1;
3237 goto exit_lbl;
3238 }
3239
3240 wnd_sum -= first_bd_sz;
3241 }
3242
3243 /* Others are easier: run through the frag list and
3244 check all windows */
3245 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3246 wnd_sum +=
9e903e08 3247 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3248
3249 if (unlikely(wnd_sum < lso_mss)) {
3250 to_copy = 1;
3251 break;
3252 }
3253 wnd_sum -=
9e903e08 3254 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3255 }
3256 } else {
3257 /* in non-LSO too fragmented packet should always
3258 be linearized */
3259 to_copy = 1;
3260 }
3261 }
3262
3263exit_lbl:
3264 if (unlikely(to_copy))
3265 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3266 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3267 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3268 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3269
3270 return to_copy;
3271}
3272#endif
3273
91226790
DK
3274static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3275 u32 xmit_type)
f2e0899f 3276{
a848ade4
DK
3277 struct ipv6hdr *ipv6;
3278
2297a2da
VZ
3279 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3280 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3281 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3282
3283 if (xmit_type & XMIT_GSO_ENC_V6)
3284 ipv6 = inner_ipv6_hdr(skb);
3285 else if (xmit_type & XMIT_GSO_V6)
3286 ipv6 = ipv6_hdr(skb);
3287 else
3288 ipv6 = NULL;
3289
3290 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3291 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3292}
3293
3294/**
e8920674 3295 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3296 *
e8920674
DK
3297 * @skb: packet skb
3298 * @pbd: parse BD
3299 * @xmit_type: xmit flags
f2e0899f 3300 */
91226790
DK
3301static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3302 struct eth_tx_parse_bd_e1x *pbd,
3303 u32 xmit_type)
f2e0899f
DK
3304{
3305 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3306 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3307 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3308
3309 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3310 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3311 pbd->tcp_pseudo_csum =
86564c3f
YM
3312 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3313 ip_hdr(skb)->daddr,
3314 0, IPPROTO_TCP, 0));
f2e0899f
DK
3315
3316 } else
3317 pbd->tcp_pseudo_csum =
86564c3f
YM
3318 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3319 &ipv6_hdr(skb)->daddr,
3320 0, IPPROTO_TCP, 0));
f2e0899f 3321
86564c3f
YM
3322 pbd->global_data |=
3323 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3324}
f85582f8 3325
a848ade4
DK
3326/**
3327 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3328 *
3329 * @bp: driver handle
3330 * @skb: packet skb
3331 * @parsing_data: data to be updated
3332 * @xmit_type: xmit flags
3333 *
3334 * 57712/578xx related, when skb has encapsulation
3335 */
3336static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3337 u32 *parsing_data, u32 xmit_type)
3338{
3339 *parsing_data |=
3340 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3341 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3342 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3343
3344 if (xmit_type & XMIT_CSUM_TCP) {
3345 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3346 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3347 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3348
3349 return skb_inner_transport_header(skb) +
3350 inner_tcp_hdrlen(skb) - skb->data;
3351 }
3352
3353 /* We support checksum offload for TCP and UDP only.
3354 * No need to pass the UDP header length - it's a constant.
3355 */
3356 return skb_inner_transport_header(skb) +
3357 sizeof(struct udphdr) - skb->data;
3358}
3359
f2e0899f 3360/**
e8920674 3361 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3362 *
e8920674
DK
3363 * @bp: driver handle
3364 * @skb: packet skb
3365 * @parsing_data: data to be updated
3366 * @xmit_type: xmit flags
f2e0899f 3367 *
91226790 3368 * 57712/578xx related
f2e0899f 3369 */
91226790
DK
3370static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3371 u32 *parsing_data, u32 xmit_type)
f2e0899f 3372{
e39aece7 3373 *parsing_data |=
2de67439 3374 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3375 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3376 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3377
e39aece7
VZ
3378 if (xmit_type & XMIT_CSUM_TCP) {
3379 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3380 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3381 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3382
e39aece7 3383 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3384 }
3385 /* We support checksum offload for TCP and UDP only.
3386 * No need to pass the UDP header length - it's a constant.
3387 */
3388 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3389}
3390
a848ade4 3391/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3392static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3393 struct eth_tx_start_bd *tx_start_bd,
3394 u32 xmit_type)
93ef5c02 3395{
93ef5c02
DK
3396 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3397
a848ade4 3398 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3399 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3400
3401 if (!(xmit_type & XMIT_CSUM_TCP))
3402 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3403}
3404
f2e0899f 3405/**
e8920674 3406 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3407 *
e8920674
DK
3408 * @bp: driver handle
3409 * @skb: packet skb
3410 * @pbd: parse BD to be updated
3411 * @xmit_type: xmit flags
f2e0899f 3412 */
91226790
DK
3413static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3414 struct eth_tx_parse_bd_e1x *pbd,
3415 u32 xmit_type)
f2e0899f 3416{
e39aece7 3417 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3418
3419 /* for now NS flag is not used in Linux */
3420 pbd->global_data =
86564c3f
YM
3421 cpu_to_le16(hlen |
3422 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3423 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3424
3425 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3426 skb_network_header(skb)) >> 1;
f2e0899f 3427
e39aece7
VZ
3428 hlen += pbd->ip_hlen_w;
3429
3430 /* We support checksum offload for TCP and UDP only */
3431 if (xmit_type & XMIT_CSUM_TCP)
3432 hlen += tcp_hdrlen(skb) / 2;
3433 else
3434 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3435
3436 pbd->total_hlen_w = cpu_to_le16(hlen);
3437 hlen = hlen*2;
3438
3439 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3440 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3441
3442 } else {
3443 s8 fix = SKB_CS_OFF(skb); /* signed! */
3444
3445 DP(NETIF_MSG_TX_QUEUED,
3446 "hlen %d fix %d csum before fix %x\n",
3447 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3448
3449 /* HW bug: fixup the CSUM */
3450 pbd->tcp_pseudo_csum =
3451 bnx2x_csum_fix(skb_transport_header(skb),
3452 SKB_CS(skb), fix);
3453
3454 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3455 pbd->tcp_pseudo_csum);
3456 }
3457
3458 return hlen;
3459}
f85582f8 3460
a848ade4
DK
3461static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3462 struct eth_tx_parse_bd_e2 *pbd_e2,
3463 struct eth_tx_parse_2nd_bd *pbd2,
3464 u16 *global_data,
3465 u32 xmit_type)
3466{
e287a75c 3467 u16 hlen_w = 0;
a848ade4 3468 u8 outerip_off, outerip_len = 0;
e287a75c
DK
3469 /* from outer IP to transport */
3470 hlen_w = (skb_inner_transport_header(skb) -
3471 skb_network_header(skb)) >> 1;
a848ade4
DK
3472
3473 /* transport len */
3474 if (xmit_type & XMIT_CSUM_TCP)
e287a75c 3475 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3476 else
e287a75c 3477 hlen_w += sizeof(struct udphdr) >> 1;
a848ade4 3478
e287a75c 3479 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4
DK
3480
3481 if (xmit_type & XMIT_CSUM_ENC_V4) {
e287a75c 3482 struct iphdr *iph = ip_hdr(skb);
a848ade4
DK
3483 pbd2->fw_ip_csum_wo_len_flags_frag =
3484 bswab16(csum_fold((~iph->check) -
3485 iph->tot_len - iph->frag_off));
3486 } else {
3487 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3488 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3489 }
3490
3491 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3492
3493 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3494
3495 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3496 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3497
3498 pbd_e2->data.tunnel_data.pseudo_csum =
3499 bswab16(~csum_tcpudp_magic(
3500 inner_ip_hdr(skb)->saddr,
3501 inner_ip_hdr(skb)->daddr,
3502 0, IPPROTO_TCP, 0));
3503
3504 outerip_len = ip_hdr(skb)->ihl << 1;
3505 } else {
3506 pbd_e2->data.tunnel_data.pseudo_csum =
3507 bswab16(~csum_ipv6_magic(
3508 &inner_ipv6_hdr(skb)->saddr,
3509 &inner_ipv6_hdr(skb)->daddr,
3510 0, IPPROTO_TCP, 0));
3511 }
3512
3513 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3514
3515 *global_data |=
3516 outerip_off |
3517 (!!(xmit_type & XMIT_CSUM_V6) <<
3518 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3519 (outerip_len <<
3520 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3521 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3522 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3523}
3524
9f6c9258
DK
3525/* called with netif_tx_lock
3526 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3527 * netif_wake_queue()
3528 */
3529netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3530{
3531 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3532
9f6c9258 3533 struct netdev_queue *txq;
6383c0b3 3534 struct bnx2x_fp_txdata *txdata;
9f6c9258 3535 struct sw_tx_bd *tx_buf;
619c5cb6 3536 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3537 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3538 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3539 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3540 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3541 u32 pbd_e2_parsing_data = 0;
9f6c9258 3542 u16 pkt_prod, bd_prod;
65565884 3543 int nbd, txq_index;
9f6c9258
DK
3544 dma_addr_t mapping;
3545 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3546 int i;
3547 u8 hlen = 0;
3548 __le16 pkt_size = 0;
3549 struct ethhdr *eth;
3550 u8 mac_type = UNICAST_ADDRESS;
3551
3552#ifdef BNX2X_STOP_ON_ERROR
3553 if (unlikely(bp->panic))
3554 return NETDEV_TX_BUSY;
3555#endif
3556
6383c0b3
AE
3557 txq_index = skb_get_queue_mapping(skb);
3558 txq = netdev_get_tx_queue(dev, txq_index);
3559
55c11941 3560 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3561
65565884 3562 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3563
3564 /* enable this debug print to view the transmission queue being used
51c1a580 3565 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3566 txq_index, fp_index, txdata_index); */
9f6c9258 3567
6383c0b3 3568 /* enable this debug print to view the tranmission details
51c1a580
MS
3569 DP(NETIF_MSG_TX_QUEUED,
3570 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3571 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3572
6383c0b3 3573 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3574 skb_shinfo(skb)->nr_frags +
3575 BDS_PER_TX_PKT +
3576 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3577 /* Handle special storage cases separately */
c96bdc0c
DK
3578 if (txdata->tx_ring_size == 0) {
3579 struct bnx2x_eth_q_stats *q_stats =
3580 bnx2x_fp_qstats(bp, txdata->parent_fp);
3581 q_stats->driver_filtered_tx_pkt++;
3582 dev_kfree_skb(skb);
3583 return NETDEV_TX_OK;
3584 }
2de67439
YM
3585 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3586 netif_tx_stop_queue(txq);
c96bdc0c 3587 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3588
9f6c9258
DK
3589 return NETDEV_TX_BUSY;
3590 }
3591
51c1a580 3592 DP(NETIF_MSG_TX_QUEUED,
04c46736 3593 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3594 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3595 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3596 skb->len);
9f6c9258
DK
3597
3598 eth = (struct ethhdr *)skb->data;
3599
3600 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3601 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3602 if (is_broadcast_ether_addr(eth->h_dest))
3603 mac_type = BROADCAST_ADDRESS;
3604 else
3605 mac_type = MULTICAST_ADDRESS;
3606 }
3607
91226790 3608#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3609 /* First, check if we need to linearize the skb (due to FW
3610 restrictions). No need to check fragmentation if page size > 8K
3611 (there will be no violation to FW restrictions) */
3612 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3613 /* Statistics of linearization */
3614 bp->lin_cnt++;
3615 if (skb_linearize(skb) != 0) {
51c1a580
MS
3616 DP(NETIF_MSG_TX_QUEUED,
3617 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3618 dev_kfree_skb_any(skb);
3619 return NETDEV_TX_OK;
3620 }
3621 }
3622#endif
619c5cb6
VZ
3623 /* Map skb linear data for DMA */
3624 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3625 skb_headlen(skb), DMA_TO_DEVICE);
3626 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3627 DP(NETIF_MSG_TX_QUEUED,
3628 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3629 dev_kfree_skb_any(skb);
3630 return NETDEV_TX_OK;
3631 }
9f6c9258
DK
3632 /*
3633 Please read carefully. First we use one BD which we mark as start,
3634 then we have a parsing info BD (used for TSO or xsum),
3635 and only then we have the rest of the TSO BDs.
3636 (don't forget to mark the last one as last,
3637 and to unmap only AFTER you write to the BD ...)
3638 And above all, all pdb sizes are in words - NOT DWORDS!
3639 */
3640
619c5cb6
VZ
3641 /* get current pkt produced now - advance it just before sending packet
3642 * since mapping of pages may fail and cause packet to be dropped
3643 */
6383c0b3
AE
3644 pkt_prod = txdata->tx_pkt_prod;
3645 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3646
619c5cb6
VZ
3647 /* get a tx_buf and first BD
3648 * tx_start_bd may be changed during SPLIT,
3649 * but first_bd will always stay first
3650 */
6383c0b3
AE
3651 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3652 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3653 first_bd = tx_start_bd;
9f6c9258
DK
3654
3655 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3656
91226790
DK
3657 /* header nbd: indirectly zero other flags! */
3658 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3659
3660 /* remember the first BD of the packet */
6383c0b3 3661 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3662 tx_buf->skb = skb;
3663 tx_buf->flags = 0;
3664
3665 DP(NETIF_MSG_TX_QUEUED,
3666 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3667 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3668
eab6d18d 3669 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3670 tx_start_bd->vlan_or_ethertype =
3671 cpu_to_le16(vlan_tx_tag_get(skb));
3672 tx_start_bd->bd_flags.as_bitfield |=
3673 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3674 } else {
3675 /* when transmitting in a vf, start bd must hold the ethertype
3676 * for fw to enforce it
3677 */
91226790 3678 if (IS_VF(bp))
dc1ba591
AE
3679 tx_start_bd->vlan_or_ethertype =
3680 cpu_to_le16(ntohs(eth->h_proto));
91226790 3681 else
dc1ba591
AE
3682 /* used by FW for packet accounting */
3683 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3684 }
9f6c9258 3685
91226790
DK
3686 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3687
9f6c9258
DK
3688 /* turn on parsing and get a BD */
3689 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3690
93ef5c02
DK
3691 if (xmit_type & XMIT_CSUM)
3692 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3693
619c5cb6 3694 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3695 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3696 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3697
3698 if (xmit_type & XMIT_CSUM_ENC) {
3699 u16 global_data = 0;
3700
3701 /* Set PBD in enc checksum offload case */
3702 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3703 &pbd_e2_parsing_data,
3704 xmit_type);
3705
3706 /* turn on 2nd parsing and get a BD */
3707 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3708
3709 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3710
3711 memset(pbd2, 0, sizeof(*pbd2));
3712
3713 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3714 (skb_inner_network_header(skb) -
3715 skb->data) >> 1;
3716
3717 if (xmit_type & XMIT_GSO_ENC)
3718 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3719 &global_data,
3720 xmit_type);
3721
3722 pbd2->global_data = cpu_to_le16(global_data);
3723
3724 /* add addition parse BD indication to start BD */
3725 SET_FLAG(tx_start_bd->general_data,
3726 ETH_TX_START_BD_PARSE_NBDS, 1);
3727 /* set encapsulation flag in start BD */
3728 SET_FLAG(tx_start_bd->general_data,
3729 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3730 nbd++;
3731 } else if (xmit_type & XMIT_CSUM) {
91226790 3732 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3733 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3734 &pbd_e2_parsing_data,
3735 xmit_type);
a848ade4 3736 }
dc1ba591 3737
91226790
DK
3738 /* Add the macs to the parsing BD this is a vf */
3739 if (IS_VF(bp)) {
3740 /* override GRE parameters in BD */
3741 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3742 &pbd_e2->data.mac_addr.src_mid,
3743 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3744 eth->h_source);
91226790
DK
3745
3746 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3747 &pbd_e2->data.mac_addr.dst_mid,
3748 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3749 eth->h_dest);
3750 }
96bed4b9
YM
3751
3752 SET_FLAG(pbd_e2_parsing_data,
3753 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3754 } else {
96bed4b9 3755 u16 global_data = 0;
6383c0b3 3756 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3757 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3758 /* Set PBD in checksum offload case */
3759 if (xmit_type & XMIT_CSUM)
3760 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3761
96bed4b9
YM
3762 SET_FLAG(global_data,
3763 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3764 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3765 }
3766
f85582f8 3767 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3768 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3769 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3770 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3771 pkt_size = tx_start_bd->nbytes;
3772
51c1a580 3773 DP(NETIF_MSG_TX_QUEUED,
91226790 3774 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3775 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3776 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3777 tx_start_bd->bd_flags.as_bitfield,
3778 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3779
3780 if (xmit_type & XMIT_GSO) {
3781
3782 DP(NETIF_MSG_TX_QUEUED,
3783 "TSO packet len %d hlen %d total len %d tso size %d\n",
3784 skb->len, hlen, skb_headlen(skb),
3785 skb_shinfo(skb)->gso_size);
3786
3787 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3788
91226790
DK
3789 if (unlikely(skb_headlen(skb) > hlen)) {
3790 nbd++;
6383c0b3
AE
3791 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3792 &tx_start_bd, hlen,
91226790
DK
3793 bd_prod);
3794 }
619c5cb6 3795 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3796 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3797 xmit_type);
f2e0899f
DK
3798 else
3799 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 3800 }
2297a2da
VZ
3801
3802 /* Set the PBD's parsing_data field if not zero
3803 * (for the chips newer than 57711).
3804 */
3805 if (pbd_e2_parsing_data)
3806 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3807
9f6c9258
DK
3808 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3809
f85582f8 3810 /* Handle fragmented skb */
9f6c9258
DK
3811 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3812 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3813
9e903e08
ED
3814 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3815 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3816 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3817 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3818
51c1a580
MS
3819 DP(NETIF_MSG_TX_QUEUED,
3820 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3821
3822 /* we need unmap all buffers already mapped
3823 * for this SKB;
3824 * first_bd->nbd need to be properly updated
3825 * before call to bnx2x_free_tx_pkt
3826 */
3827 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3828 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3829 TX_BD(txdata->tx_pkt_prod),
3830 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3831 return NETDEV_TX_OK;
3832 }
3833
9f6c9258 3834 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3835 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3836 if (total_pkt_bd == NULL)
6383c0b3 3837 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3838
9f6c9258
DK
3839 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3840 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3841 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3842 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3843 nbd++;
9f6c9258
DK
3844
3845 DP(NETIF_MSG_TX_QUEUED,
3846 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3847 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3848 le16_to_cpu(tx_data_bd->nbytes));
3849 }
3850
3851 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3852
619c5cb6
VZ
3853 /* update with actual num BDs */
3854 first_bd->nbd = cpu_to_le16(nbd);
3855
9f6c9258
DK
3856 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3857
3858 /* now send a tx doorbell, counting the next BD
3859 * if the packet contains or ends with it
3860 */
3861 if (TX_BD_POFF(bd_prod) < nbd)
3862 nbd++;
3863
619c5cb6
VZ
3864 /* total_pkt_bytes should be set on the first data BD if
3865 * it's not an LSO packet and there is more than one
3866 * data BD. In this case pkt_size is limited by an MTU value.
3867 * However we prefer to set it for an LSO packet (while we don't
3868 * have to) in order to save some CPU cycles in a none-LSO
3869 * case, when we much more care about them.
3870 */
9f6c9258
DK
3871 if (total_pkt_bd != NULL)
3872 total_pkt_bd->total_pkt_bytes = pkt_size;
3873
523224a3 3874 if (pbd_e1x)
9f6c9258 3875 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3876 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3877 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3878 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3879 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3880 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3881 if (pbd_e2)
3882 DP(NETIF_MSG_TX_QUEUED,
3883 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
3884 pbd_e2,
3885 pbd_e2->data.mac_addr.dst_hi,
3886 pbd_e2->data.mac_addr.dst_mid,
3887 pbd_e2->data.mac_addr.dst_lo,
3888 pbd_e2->data.mac_addr.src_hi,
3889 pbd_e2->data.mac_addr.src_mid,
3890 pbd_e2->data.mac_addr.src_lo,
f2e0899f 3891 pbd_e2->parsing_data);
9f6c9258
DK
3892 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3893
2df1a70a
TH
3894 netdev_tx_sent_queue(txq, skb->len);
3895
8373c57d
WB
3896 skb_tx_timestamp(skb);
3897
6383c0b3 3898 txdata->tx_pkt_prod++;
9f6c9258
DK
3899 /*
3900 * Make sure that the BD data is updated before updating the producer
3901 * since FW might read the BD right after the producer is updated.
3902 * This is only applicable for weak-ordered memory model archs such
3903 * as IA-64. The following barrier is also mandatory since FW will
3904 * assumes packets must have BDs.
3905 */
3906 wmb();
3907
6383c0b3 3908 txdata->tx_db.data.prod += nbd;
9f6c9258 3909 barrier();
f85582f8 3910
6383c0b3 3911 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
3912
3913 mmiowb();
3914
6383c0b3 3915 txdata->tx_bd_prod += nbd;
9f6c9258 3916
7df2dc6b 3917 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
3918 netif_tx_stop_queue(txq);
3919
3920 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3921 * ordering of set_bit() in netif_tx_stop_queue() and read of
3922 * fp->bd_tx_cons */
3923 smp_mb();
3924
15192a8c 3925 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 3926 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
3927 netif_tx_wake_queue(txq);
3928 }
6383c0b3 3929 txdata->tx_pkt++;
9f6c9258
DK
3930
3931 return NETDEV_TX_OK;
3932}
f85582f8 3933
6383c0b3
AE
3934/**
3935 * bnx2x_setup_tc - routine to configure net_device for multi tc
3936 *
3937 * @netdev: net device to configure
3938 * @tc: number of traffic classes to enable
3939 *
3940 * callback connected to the ndo_setup_tc function pointer
3941 */
3942int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3943{
3944 int cos, prio, count, offset;
3945 struct bnx2x *bp = netdev_priv(dev);
3946
3947 /* setup tc must be called under rtnl lock */
3948 ASSERT_RTNL();
3949
3950 /* no traffic classes requested. aborting */
3951 if (!num_tc) {
3952 netdev_reset_tc(dev);
3953 return 0;
3954 }
3955
3956 /* requested to support too many traffic classes */
3957 if (num_tc > bp->max_cos) {
51c1a580
MS
3958 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3959 num_tc, bp->max_cos);
6383c0b3
AE
3960 return -EINVAL;
3961 }
3962
3963 /* declare amount of supported traffic classes */
3964 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 3965 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
3966 return -EINVAL;
3967 }
3968
3969 /* configure priority to traffic class mapping */
3970 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3971 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
3972 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3973 "mapping priority %d to tc %d\n",
6383c0b3
AE
3974 prio, bp->prio_to_cos[prio]);
3975 }
3976
3977
3978 /* Use this configuration to diffrentiate tc0 from other COSes
3979 This can be used for ets or pfc, and save the effort of setting
3980 up a multio class queue disc or negotiating DCBX with a switch
3981 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 3982 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
3983 for (prio = 1; prio < 16; prio++) {
3984 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 3985 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
3986 } */
3987
3988 /* configure traffic class to transmission queue mapping */
3989 for (cos = 0; cos < bp->max_cos; cos++) {
3990 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 3991 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 3992 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
3993 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3994 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
3995 cos, offset, count);
3996 }
3997
3998 return 0;
3999}
4000
9f6c9258
DK
4001/* called with rtnl_lock */
4002int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4003{
4004 struct sockaddr *addr = p;
4005 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4006 int rc = 0;
9f6c9258 4007
51c1a580
MS
4008 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4009 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4010 return -EINVAL;
51c1a580 4011 }
614c76df 4012
a3348722
BW
4013 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4014 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4015 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4016 return -EINVAL;
51c1a580 4017 }
9f6c9258 4018
619c5cb6
VZ
4019 if (netif_running(dev)) {
4020 rc = bnx2x_set_eth_mac(bp, false);
4021 if (rc)
4022 return rc;
4023 }
4024
9f6c9258 4025 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4026
523224a3 4027 if (netif_running(dev))
619c5cb6 4028 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4029
619c5cb6 4030 return rc;
9f6c9258
DK
4031}
4032
b3b83c3f
DK
4033static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4034{
4035 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4036 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4037 u8 cos;
b3b83c3f
DK
4038
4039 /* Common */
55c11941 4040
b3b83c3f
DK
4041 if (IS_FCOE_IDX(fp_index)) {
4042 memset(sb, 0, sizeof(union host_hc_status_block));
4043 fp->status_blk_mapping = 0;
b3b83c3f 4044 } else {
b3b83c3f 4045 /* status blocks */
619c5cb6 4046 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4047 BNX2X_PCI_FREE(sb->e2_sb,
4048 bnx2x_fp(bp, fp_index,
4049 status_blk_mapping),
4050 sizeof(struct host_hc_status_block_e2));
4051 else
4052 BNX2X_PCI_FREE(sb->e1x_sb,
4053 bnx2x_fp(bp, fp_index,
4054 status_blk_mapping),
4055 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4056 }
55c11941 4057
b3b83c3f
DK
4058 /* Rx */
4059 if (!skip_rx_queue(bp, fp_index)) {
4060 bnx2x_free_rx_bds(fp);
4061
4062 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4063 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4064 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4065 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4066 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4067
4068 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4069 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4070 sizeof(struct eth_fast_path_rx_cqe) *
4071 NUM_RCQ_BD);
4072
4073 /* SGE ring */
4074 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4075 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4076 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4077 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4078 }
4079
4080 /* Tx */
4081 if (!skip_tx_queue(bp, fp_index)) {
4082 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4083 for_each_cos_in_tx_queue(fp, cos) {
65565884 4084 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4085
51c1a580 4086 DP(NETIF_MSG_IFDOWN,
94f05b0f 4087 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4088 fp_index, cos, txdata->cid);
4089
4090 BNX2X_FREE(txdata->tx_buf_ring);
4091 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4092 txdata->tx_desc_mapping,
4093 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4094 }
b3b83c3f
DK
4095 }
4096 /* end of fastpath */
4097}
4098
55c11941
MS
4099void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4100{
4101 int i;
4102 for_each_cnic_queue(bp, i)
4103 bnx2x_free_fp_mem_at(bp, i);
4104}
4105
b3b83c3f
DK
4106void bnx2x_free_fp_mem(struct bnx2x *bp)
4107{
4108 int i;
55c11941 4109 for_each_eth_queue(bp, i)
b3b83c3f
DK
4110 bnx2x_free_fp_mem_at(bp, i);
4111}
4112
1191cb83 4113static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4114{
4115 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4116 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4117 bnx2x_fp(bp, index, sb_index_values) =
4118 (__le16 *)status_blk.e2_sb->sb.index_values;
4119 bnx2x_fp(bp, index, sb_running_index) =
4120 (__le16 *)status_blk.e2_sb->sb.running_index;
4121 } else {
4122 bnx2x_fp(bp, index, sb_index_values) =
4123 (__le16 *)status_blk.e1x_sb->sb.index_values;
4124 bnx2x_fp(bp, index, sb_running_index) =
4125 (__le16 *)status_blk.e1x_sb->sb.running_index;
4126 }
4127}
4128
1191cb83
ED
4129/* Returns the number of actually allocated BDs */
4130static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4131 int rx_ring_size)
4132{
4133 struct bnx2x *bp = fp->bp;
4134 u16 ring_prod, cqe_ring_prod;
4135 int i, failure_cnt = 0;
4136
4137 fp->rx_comp_cons = 0;
4138 cqe_ring_prod = ring_prod = 0;
4139
4140 /* This routine is called only during fo init so
4141 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4142 */
4143 for (i = 0; i < rx_ring_size; i++) {
4144 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4145 failure_cnt++;
4146 continue;
4147 }
4148 ring_prod = NEXT_RX_IDX(ring_prod);
4149 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4150 WARN_ON(ring_prod <= (i - failure_cnt));
4151 }
4152
4153 if (failure_cnt)
4154 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4155 i - failure_cnt, fp->index);
4156
4157 fp->rx_bd_prod = ring_prod;
4158 /* Limit the CQE producer by the CQE ring size */
4159 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4160 cqe_ring_prod);
4161 fp->rx_pkt = fp->rx_calls = 0;
4162
15192a8c 4163 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4164
4165 return i - failure_cnt;
4166}
4167
4168static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4169{
4170 int i;
4171
4172 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4173 struct eth_rx_cqe_next_page *nextpg;
4174
4175 nextpg = (struct eth_rx_cqe_next_page *)
4176 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4177 nextpg->addr_hi =
4178 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4179 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4180 nextpg->addr_lo =
4181 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4182 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4183 }
4184}
4185
b3b83c3f
DK
4186static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4187{
4188 union host_hc_status_block *sb;
4189 struct bnx2x_fastpath *fp = &bp->fp[index];
4190 int ring_size = 0;
6383c0b3 4191 u8 cos;
c2188952 4192 int rx_ring_size = 0;
b3b83c3f 4193
a3348722
BW
4194 if (!bp->rx_ring_size &&
4195 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4196 rx_ring_size = MIN_RX_SIZE_NONTPA;
4197 bp->rx_ring_size = rx_ring_size;
55c11941 4198 } else if (!bp->rx_ring_size) {
c2188952
VZ
4199 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4200
065f8b92
YM
4201 if (CHIP_IS_E3(bp)) {
4202 u32 cfg = SHMEM_RD(bp,
4203 dev_info.port_hw_config[BP_PORT(bp)].
4204 default_cfg);
4205
4206 /* Decrease ring size for 1G functions */
4207 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4208 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4209 rx_ring_size /= 10;
4210 }
d760fc37 4211
c2188952
VZ
4212 /* allocate at least number of buffers required by FW */
4213 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4214 MIN_RX_SIZE_TPA, rx_ring_size);
4215
4216 bp->rx_ring_size = rx_ring_size;
614c76df 4217 } else /* if rx_ring_size specified - use it */
c2188952 4218 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4219
04c46736
YM
4220 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4221
b3b83c3f
DK
4222 /* Common */
4223 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4224
b3b83c3f 4225 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4226 /* status blocks */
619c5cb6 4227 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4228 BNX2X_PCI_ALLOC(sb->e2_sb,
4229 &bnx2x_fp(bp, index, status_blk_mapping),
4230 sizeof(struct host_hc_status_block_e2));
4231 else
4232 BNX2X_PCI_ALLOC(sb->e1x_sb,
4233 &bnx2x_fp(bp, index, status_blk_mapping),
4234 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4235 }
8eef2af1
DK
4236
4237 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4238 * set shortcuts for it.
4239 */
4240 if (!IS_FCOE_IDX(index))
4241 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4242
4243 /* Tx */
4244 if (!skip_tx_queue(bp, index)) {
4245 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4246 for_each_cos_in_tx_queue(fp, cos) {
65565884 4247 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4248
51c1a580
MS
4249 DP(NETIF_MSG_IFUP,
4250 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4251 index, cos);
4252
4253 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4254 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4255 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4256 &txdata->tx_desc_mapping,
b3b83c3f 4257 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4258 }
b3b83c3f
DK
4259 }
4260
4261 /* Rx */
4262 if (!skip_rx_queue(bp, index)) {
4263 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4264 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4265 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4266 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4267 &bnx2x_fp(bp, index, rx_desc_mapping),
4268 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4269
4270 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4271 &bnx2x_fp(bp, index, rx_comp_mapping),
4272 sizeof(struct eth_fast_path_rx_cqe) *
4273 NUM_RCQ_BD);
4274
4275 /* SGE ring */
4276 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4277 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4278 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4279 &bnx2x_fp(bp, index, rx_sge_mapping),
4280 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4281 /* RX BD ring */
4282 bnx2x_set_next_page_rx_bd(fp);
4283
4284 /* CQ ring */
4285 bnx2x_set_next_page_rx_cq(fp);
4286
4287 /* BDs */
4288 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4289 if (ring_size < rx_ring_size)
4290 goto alloc_mem_err;
4291 }
4292
4293 return 0;
4294
4295/* handles low memory cases */
4296alloc_mem_err:
4297 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4298 index, ring_size);
4299 /* FW will drop all packets if queue is not big enough,
4300 * In these cases we disable the queue
6383c0b3 4301 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4302 */
4303 if (ring_size < (fp->disable_tpa ?
eb722d7a 4304 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4305 /* release memory allocated for this queue */
4306 bnx2x_free_fp_mem_at(bp, index);
4307 return -ENOMEM;
4308 }
4309 return 0;
4310}
4311
55c11941
MS
4312int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4313{
4314 if (!NO_FCOE(bp))
4315 /* FCoE */
4316 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4317 /* we will fail load process instead of mark
4318 * NO_FCOE_FLAG
4319 */
4320 return -ENOMEM;
4321
4322 return 0;
4323}
4324
b3b83c3f
DK
4325int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4326{
4327 int i;
4328
55c11941
MS
4329 /* 1. Allocate FP for leading - fatal if error
4330 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4331 */
4332
4333 /* leading */
4334 if (bnx2x_alloc_fp_mem_at(bp, 0))
4335 return -ENOMEM;
6383c0b3 4336
b3b83c3f
DK
4337 /* RSS */
4338 for_each_nondefault_eth_queue(bp, i)
4339 if (bnx2x_alloc_fp_mem_at(bp, i))
4340 break;
4341
4342 /* handle memory failures */
4343 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4344 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4345
4346 WARN_ON(delta < 0);
4864a16a 4347 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4348 if (CNIC_SUPPORT(bp))
4349 /* move non eth FPs next to last eth FP
4350 * must be done in that order
4351 * FCOE_IDX < FWD_IDX < OOO_IDX
4352 */
b3b83c3f 4353
55c11941
MS
4354 /* move FCoE fp even NO_FCOE_FLAG is on */
4355 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4356 bp->num_ethernet_queues -= delta;
4357 bp->num_queues = bp->num_ethernet_queues +
4358 bp->num_cnic_queues;
b3b83c3f
DK
4359 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4360 bp->num_queues + delta, bp->num_queues);
4361 }
4362
4363 return 0;
4364}
d6214d7a 4365
523224a3
DK
4366void bnx2x_free_mem_bp(struct bnx2x *bp)
4367{
c3146eb6
DK
4368 int i;
4369
4370 for (i = 0; i < bp->fp_array_size; i++)
4371 kfree(bp->fp[i].tpa_info);
523224a3 4372 kfree(bp->fp);
15192a8c
BW
4373 kfree(bp->sp_objs);
4374 kfree(bp->fp_stats);
65565884 4375 kfree(bp->bnx2x_txq);
523224a3
DK
4376 kfree(bp->msix_table);
4377 kfree(bp->ilt);
4378}
4379
0329aba1 4380int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4381{
4382 struct bnx2x_fastpath *fp;
4383 struct msix_entry *tbl;
4384 struct bnx2x_ilt *ilt;
6383c0b3 4385 int msix_table_size = 0;
55c11941 4386 int fp_array_size, txq_array_size;
15192a8c 4387 int i;
6383c0b3
AE
4388
4389 /*
4390 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4391 * path IGU SBs plus default SB (for PF only).
6383c0b3 4392 */
1ab4434c
AE
4393 msix_table_size = bp->igu_sb_cnt;
4394 if (IS_PF(bp))
4395 msix_table_size++;
4396 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4397
6383c0b3 4398 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4399 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4400 bp->fp_array_size = fp_array_size;
4401 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4402
c3146eb6 4403 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4404 if (!fp)
4405 goto alloc_err;
c3146eb6 4406 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4407 fp[i].tpa_info =
4408 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4409 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4410 if (!(fp[i].tpa_info))
4411 goto alloc_err;
4412 }
4413
523224a3
DK
4414 bp->fp = fp;
4415
15192a8c 4416 /* allocate sp objs */
c3146eb6 4417 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4418 GFP_KERNEL);
4419 if (!bp->sp_objs)
4420 goto alloc_err;
4421
4422 /* allocate fp_stats */
c3146eb6 4423 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4424 GFP_KERNEL);
4425 if (!bp->fp_stats)
4426 goto alloc_err;
4427
65565884 4428 /* Allocate memory for the transmission queues array */
55c11941
MS
4429 txq_array_size =
4430 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4431 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4432
4433 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4434 GFP_KERNEL);
65565884
MS
4435 if (!bp->bnx2x_txq)
4436 goto alloc_err;
4437
523224a3 4438 /* msix table */
01e23742 4439 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4440 if (!tbl)
4441 goto alloc_err;
4442 bp->msix_table = tbl;
4443
4444 /* ilt */
4445 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4446 if (!ilt)
4447 goto alloc_err;
4448 bp->ilt = ilt;
4449
4450 return 0;
4451alloc_err:
4452 bnx2x_free_mem_bp(bp);
4453 return -ENOMEM;
4454
4455}
4456
a9fccec7 4457int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4458{
4459 struct bnx2x *bp = netdev_priv(dev);
4460
4461 if (unlikely(!netif_running(dev)))
4462 return 0;
4463
5d07d868 4464 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4465 return bnx2x_nic_load(bp, LOAD_NORMAL);
4466}
4467
1ac9e428
YR
4468int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4469{
4470 u32 sel_phy_idx = 0;
4471 if (bp->link_params.num_phys <= 1)
4472 return INT_PHY;
4473
4474 if (bp->link_vars.link_up) {
4475 sel_phy_idx = EXT_PHY1;
4476 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4477 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4478 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4479 sel_phy_idx = EXT_PHY2;
4480 } else {
4481
4482 switch (bnx2x_phy_selection(&bp->link_params)) {
4483 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4484 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4485 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4486 sel_phy_idx = EXT_PHY1;
4487 break;
4488 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4489 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4490 sel_phy_idx = EXT_PHY2;
4491 break;
4492 }
4493 }
4494
4495 return sel_phy_idx;
4496
4497}
4498int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4499{
4500 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4501 /*
2de67439 4502 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4503 * swapping is enabled). So when swapping is enabled, we need to reverse
4504 * the configuration
4505 */
4506
4507 if (bp->link_params.multi_phy_config &
4508 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4509 if (sel_phy_idx == EXT_PHY1)
4510 sel_phy_idx = EXT_PHY2;
4511 else if (sel_phy_idx == EXT_PHY2)
4512 sel_phy_idx = EXT_PHY1;
4513 }
4514 return LINK_CONFIG_IDX(sel_phy_idx);
4515}
4516
55c11941 4517#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4518int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4519{
4520 struct bnx2x *bp = netdev_priv(dev);
4521 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4522
4523 switch (type) {
4524 case NETDEV_FCOE_WWNN:
4525 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4526 cp->fcoe_wwn_node_name_lo);
4527 break;
4528 case NETDEV_FCOE_WWPN:
4529 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4530 cp->fcoe_wwn_port_name_lo);
4531 break;
4532 default:
51c1a580 4533 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4534 return -EINVAL;
4535 }
4536
4537 return 0;
4538}
4539#endif
4540
9f6c9258
DK
4541/* called with rtnl_lock */
4542int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4543{
4544 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4545
4546 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4547 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4548 return -EAGAIN;
4549 }
4550
4551 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4552 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4553 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4554 return -EINVAL;
51c1a580 4555 }
9f6c9258
DK
4556
4557 /* This does not race with packet allocation
4558 * because the actual alloc size is
4559 * only updated as part of load
4560 */
4561 dev->mtu = new_mtu;
4562
66371c44
MM
4563 return bnx2x_reload_if_running(dev);
4564}
4565
c8f44aff 4566netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4567 netdev_features_t features)
66371c44
MM
4568{
4569 struct bnx2x *bp = netdev_priv(dev);
4570
4571 /* TPA requires Rx CSUM offloading */
621b4d66 4572 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4573 features &= ~NETIF_F_LRO;
621b4d66
DK
4574 features &= ~NETIF_F_GRO;
4575 }
66371c44
MM
4576
4577 return features;
4578}
4579
c8f44aff 4580int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4581{
4582 struct bnx2x *bp = netdev_priv(dev);
4583 u32 flags = bp->flags;
538dd2e3 4584 bool bnx2x_reload = false;
66371c44
MM
4585
4586 if (features & NETIF_F_LRO)
4587 flags |= TPA_ENABLE_FLAG;
4588 else
4589 flags &= ~TPA_ENABLE_FLAG;
4590
621b4d66
DK
4591 if (features & NETIF_F_GRO)
4592 flags |= GRO_ENABLE_FLAG;
4593 else
4594 flags &= ~GRO_ENABLE_FLAG;
4595
538dd2e3
MB
4596 if (features & NETIF_F_LOOPBACK) {
4597 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4598 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4599 bnx2x_reload = true;
4600 }
4601 } else {
4602 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4603 bp->link_params.loopback_mode = LOOPBACK_NONE;
4604 bnx2x_reload = true;
4605 }
4606 }
4607
66371c44
MM
4608 if (flags ^ bp->flags) {
4609 bp->flags = flags;
538dd2e3
MB
4610 bnx2x_reload = true;
4611 }
66371c44 4612
538dd2e3 4613 if (bnx2x_reload) {
66371c44
MM
4614 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4615 return bnx2x_reload_if_running(dev);
4616 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4617 }
4618
66371c44 4619 return 0;
9f6c9258
DK
4620}
4621
4622void bnx2x_tx_timeout(struct net_device *dev)
4623{
4624 struct bnx2x *bp = netdev_priv(dev);
4625
4626#ifdef BNX2X_STOP_ON_ERROR
4627 if (!bp->panic)
4628 bnx2x_panic();
4629#endif
7be08a72
AE
4630
4631 smp_mb__before_clear_bit();
4632 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4633 smp_mb__after_clear_bit();
4634
9f6c9258 4635 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4636 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4637}
4638
9f6c9258
DK
4639int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4640{
4641 struct net_device *dev = pci_get_drvdata(pdev);
4642 struct bnx2x *bp;
4643
4644 if (!dev) {
4645 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4646 return -ENODEV;
4647 }
4648 bp = netdev_priv(dev);
4649
4650 rtnl_lock();
4651
4652 pci_save_state(pdev);
4653
4654 if (!netif_running(dev)) {
4655 rtnl_unlock();
4656 return 0;
4657 }
4658
4659 netif_device_detach(dev);
4660
5d07d868 4661 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4662
4663 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4664
4665 rtnl_unlock();
4666
4667 return 0;
4668}
4669
4670int bnx2x_resume(struct pci_dev *pdev)
4671{
4672 struct net_device *dev = pci_get_drvdata(pdev);
4673 struct bnx2x *bp;
4674 int rc;
4675
4676 if (!dev) {
4677 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4678 return -ENODEV;
4679 }
4680 bp = netdev_priv(dev);
4681
4682 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4683 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4684 return -EAGAIN;
4685 }
4686
4687 rtnl_lock();
4688
4689 pci_restore_state(pdev);
4690
4691 if (!netif_running(dev)) {
4692 rtnl_unlock();
4693 return 0;
4694 }
4695
4696 bnx2x_set_power_state(bp, PCI_D0);
4697 netif_device_attach(dev);
4698
4699 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4700
4701 rtnl_unlock();
4702
4703 return rc;
4704}
619c5cb6
VZ
4705
4706
4707void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4708 u32 cid)
4709{
4710 /* ustorm cxt validation */
4711 cxt->ustorm_ag_context.cdu_usage =
4712 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4713 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4714 /* xcontext validation */
4715 cxt->xstorm_ag_context.cdu_reserved =
4716 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4717 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4718}
4719
1191cb83
ED
4720static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4721 u8 fw_sb_id, u8 sb_index,
4722 u8 ticks)
619c5cb6
VZ
4723{
4724
4725 u32 addr = BAR_CSTRORM_INTMEM +
4726 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4727 REG_WR8(bp, addr, ticks);
51c1a580
MS
4728 DP(NETIF_MSG_IFUP,
4729 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4730 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4731}
4732
1191cb83
ED
4733static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4734 u16 fw_sb_id, u8 sb_index,
4735 u8 disable)
619c5cb6
VZ
4736{
4737 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4738 u32 addr = BAR_CSTRORM_INTMEM +
4739 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4740 u16 flags = REG_RD16(bp, addr);
4741 /* clear and set */
4742 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4743 flags |= enable_flag;
4744 REG_WR16(bp, addr, flags);
51c1a580
MS
4745 DP(NETIF_MSG_IFUP,
4746 "port %x fw_sb_id %d sb_index %d disable %d\n",
4747 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4748}
4749
4750void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4751 u8 sb_index, u8 disable, u16 usec)
4752{
4753 int port = BP_PORT(bp);
4754 u8 ticks = usec / BNX2X_BTR;
4755
4756 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4757
4758 disable = disable ? 1 : (usec ? 0 : 1);
4759 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4760}