bnx2x: Add Private Flags Support
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
c0cba59e 27#include <linux/prefetch.h>
9f6c9258 28#include "bnx2x_cmn.h"
523224a3 29#include "bnx2x_init.h"
042181f5 30#include "bnx2x_sp.h"
9f6c9258 31
b3b83c3f
DK
32/**
33 * bnx2x_move_fp - move content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @from: source FP index
37 * @to: destination FP index
38 *
39 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
40 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
65565884
MS
42 * source onto the target. Update txdata pointers and related
43 * content.
b3b83c3f
DK
44 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
49 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
53 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
72754080
AE
55
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
58
b3b83c3f
DK
59 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
61 to_fp->index = to;
65565884 62
15192a8c
BW
63 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
65565884
MS
69 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
72 */
73
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76 (bp)->max_cos;
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 }
81
4864a16a
YM
82 memcpy(&bp->bnx2x_txq[new_txdata_index],
83 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
84 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
86}
87
8ca5e17e
AE
88/**
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
90 *
91 * @bp: driver handle
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
94 *
95 */
96void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97{
98 if (IS_PF(bp)) {
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106 "bc %d.%d.%d%s%s",
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111 } else {
6411280a 112 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
113 }
114}
115
4864a16a
YM
116/**
117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
118 *
119 * @bp: driver handle
120 * @delta: number of eth queues which were not allocated
121 */
122static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
123{
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
125
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden
128 */
129 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) {
131 struct bnx2x_fastpath *fp = &bp->fp[i];
132 int new_idx = cos * (old_eth_num - delta) + i;
133
134 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135 sizeof(struct bnx2x_fp_txdata));
136 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
137 }
138 }
139}
140
619c5cb6
VZ
141int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
142
9f6c9258
DK
143/* free skb in the packet ring at pos idx
144 * return idx of last bd freed
145 */
6383c0b3 146static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
147 u16 idx, unsigned int *pkts_compl,
148 unsigned int *bytes_compl)
9f6c9258 149{
6383c0b3 150 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
151 struct eth_tx_start_bd *tx_start_bd;
152 struct eth_tx_bd *tx_data_bd;
153 struct sk_buff *skb = tx_buf->skb;
154 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
155 int nbd;
156
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
158 prefetch(&skb->end);
159
51c1a580 160 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 161 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
162
163 /* unmap first bd */
6383c0b3 164 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 167
619c5cb6 168
9f6c9258
DK
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170#ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
173 bnx2x_panic();
174 }
175#endif
176 new_cons = nbd + tx_buf->first_bd;
177
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181 /* Skip a parse bd... */
182 --nbd;
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187 --nbd;
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189 }
190
191 /* now free frags */
192 while (nbd > 0) {
193
6383c0b3 194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197 if (--nbd)
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199 }
200
201 /* release skb */
202 WARN_ON(!skb);
d8290ae5 203 if (likely(skb)) {
2df1a70a
TH
204 (*pkts_compl)++;
205 (*bytes_compl) += skb->len;
206 }
d8290ae5 207
40955532 208 dev_kfree_skb_any(skb);
9f6c9258
DK
209 tx_buf->first_bd = 0;
210 tx_buf->skb = NULL;
211
212 return new_cons;
213}
214
6383c0b3 215int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 216{
9f6c9258 217 struct netdev_queue *txq;
6383c0b3 218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 219 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
220
221#ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
223 return -1;
224#endif
225
6383c0b3
AE
226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
229
230 while (sw_cons != hw_cons) {
231 u16 pkt_cons;
232
233 pkt_cons = TX_BD(sw_cons);
234
51c1a580
MS
235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 238
2df1a70a 239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 240 &pkts_compl, &bytes_compl);
2df1a70a 241
9f6c9258
DK
242 sw_cons++;
243 }
244
2df1a70a
TH
245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
6383c0b3
AE
247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
249
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
254 * forever.
619c5cb6
VZ
255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
258 */
259 smp_mb();
260
9f6c9258
DK
261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 *
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
269 * stops the queue
270 */
271
272 __netif_tx_lock(txq, smp_processor_id());
273
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
277 netif_tx_wake_queue(txq);
278
279 __netif_tx_unlock(txq);
280 }
281 return 0;
282}
283
284static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285 u16 idx)
286{
287 u16 last_max = fp->last_max_sge;
288
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
291}
292
621b4d66
DK
293static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294 u16 sge_len,
295 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
296{
297 struct bnx2x *bp = fp->bp;
9f6c9258
DK
298 u16 last_max, last_elem, first_elem;
299 u16 delta = 0;
300 u16 i;
301
302 if (!sge_len)
303 return;
304
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
619c5cb6 307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
309
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
312
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
523224a3 315 bnx2x_update_last_max_sge(fp,
621b4d66 316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
317
318 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
321
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
324 last_elem++;
325
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
329 break;
330
619c5cb6
VZ
331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
333 }
334
335 if (delta > 0) {
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
339 }
340
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
344}
345
2de67439 346/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
347 * CQE (calculated by HW).
348 */
349static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
350 const struct eth_fast_path_rx_cqe *cqe,
351 bool *l4_rxhash)
e52fcb24 352{
2de67439 353 /* Get Toeplitz hash from CQE */
e52fcb24 354 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
357
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 361 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
362 }
363 *l4_rxhash = false;
e52fcb24
ED
364 return 0;
365}
366
9f6c9258 367static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 368 u16 cons, u16 prod,
619c5cb6 369 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
370{
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375 dma_addr_t mapping;
619c5cb6
VZ
376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 378
619c5cb6
VZ
379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
e52fcb24 383 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 384 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 385 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
386 fp->rx_buf_size, DMA_FROM_DEVICE);
387 /*
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
391 */
392
393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
e52fcb24 395 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397 return;
398 }
9f6c9258 399
e52fcb24
ED
400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
619c5cb6 402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 403 /* point prod_bd to new data */
9f6c9258
DK
404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
619c5cb6
VZ
407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
409
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 420 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
421 tpa_info->gro_size = gro_size;
422 }
619c5cb6 423
9f6c9258
DK
424#ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426#ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428#else
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
430#endif
431 fp->tpa_queue_used);
432#endif
433}
434
e4e3c02a
VZ
435/* Timestamp option length allowed for TPA aggregation:
436 *
437 * nop nop kind length echo val
438 */
439#define TPA_TSTAMP_OPT_LEN 12
440/**
cbf1de72 441 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 442 *
cbf1de72 443 * @skb: packet skb
e8920674
DK
444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
446 * aggregation.
cbf1de72 447 * @pkt_len: length of all segments
e8920674
DK
448 *
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
2de67439 451 * Compute number of aggregated segments, and gso_type.
e4e3c02a 452 */
cbf1de72 453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
e4e3c02a 456{
cbf1de72 457 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 458 * other than timestamp or IPv6 extension headers.
e4e3c02a 459 */
619c5cb6
VZ
460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 463 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 464 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466 } else {
619c5cb6 467 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469 }
e4e3c02a
VZ
470
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
473 *
474 * Otherwise FW would close the aggregation.
475 */
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
cbf1de72
YM
479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
483 */
ab5777d7 484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
485}
486
1191cb83
ED
487static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488 struct bnx2x_fastpath *fp, u16 index)
489{
490 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
493 dma_addr_t mapping;
494
495 if (unlikely(page == NULL)) {
496 BNX2X_ERR("Can't alloc sge\n");
497 return -ENOMEM;
498 }
499
500 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 501 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
502 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503 __free_pages(page, PAGES_PER_SGE_SHIFT);
504 BNX2X_ERR("Can't map sge\n");
505 return -ENOMEM;
506 }
507
508 sw_buf->page = page;
509 dma_unmap_addr_set(sw_buf, mapping, mapping);
510
511 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
513
514 return 0;
515}
516
9f6c9258 517static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
518 struct bnx2x_agg_info *tpa_info,
519 u16 pages,
520 struct sk_buff *skb,
619c5cb6
VZ
521 struct eth_end_agg_rx_cqe *cqe,
522 u16 cqe_idx)
9f6c9258
DK
523{
524 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
525 u32 i, frag_len, frag_size;
526 int err, j, frag_id = 0;
619c5cb6 527 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 528 u16 full_page = 0, gro_size = 0;
9f6c9258 529
619c5cb6 530 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
531
532 if (fp->mode == TPA_MODE_GRO) {
533 gro_size = tpa_info->gro_size;
534 full_page = tpa_info->full_page;
535 }
9f6c9258
DK
536
537 /* This is needed in order to enable forwarding support */
cbf1de72
YM
538 if (frag_size)
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 542
9f6c9258 543#ifdef BNX2X_STOP_ON_ERROR
924d75ab 544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 pages, cqe_idx);
619c5cb6 547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
548 bnx2x_panic();
549 return -EINVAL;
550 }
551#endif
552
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
556
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
561 else /* LRO */
924d75ab 562 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 563
9f6c9258
DK
564 rx_pg = &fp->rx_page_ring[sge_idx];
565 old_rx_pg = *rx_pg;
566
567 /* If we fail to allocate a substitute page, we simply stop
568 where we are and drop the whole packet */
569 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
570 if (unlikely(err)) {
15192a8c 571 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
572 return err;
573 }
574
575 /* Unmap the page as we r going to pass it to the stack */
576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 578 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 579 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
580 if (fp->mode == TPA_MODE_LRO)
581 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
582 else { /* GRO */
583 int rem;
584 int offset = 0;
585 for (rem = frag_len; rem > 0; rem -= gro_size) {
586 int len = rem > gro_size ? gro_size : rem;
587 skb_fill_page_desc(skb, frag_id++,
588 old_rx_pg.page, offset, len);
589 if (offset)
590 get_page(old_rx_pg.page);
591 offset += len;
592 }
593 }
9f6c9258
DK
594
595 skb->data_len += frag_len;
924d75ab 596 skb->truesize += SGE_PAGES;
9f6c9258
DK
597 skb->len += frag_len;
598
599 frag_size -= frag_len;
600 }
601
602 return 0;
603}
604
d46d132c
ED
605static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
606{
607 if (fp->rx_frag_size)
608 put_page(virt_to_head_page(data));
609 else
610 kfree(data);
611}
612
613static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
614{
615 if (fp->rx_frag_size)
616 return netdev_alloc_frag(fp->rx_frag_size);
617
618 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
619}
620
9969085e
YM
621#ifdef CONFIG_INET
622static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
623{
624 const struct iphdr *iph = ip_hdr(skb);
625 struct tcphdr *th;
626
627 skb_set_transport_header(skb, sizeof(struct iphdr));
628 th = tcp_hdr(skb);
629
630 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631 iph->saddr, iph->daddr, 0);
632}
633
634static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
635{
636 struct ipv6hdr *iph = ipv6_hdr(skb);
637 struct tcphdr *th;
638
639 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
640 th = tcp_hdr(skb);
641
642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643 &iph->saddr, &iph->daddr, 0);
644}
2c2d06d5
YM
645
646static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
647 void (*gro_func)(struct bnx2x*, struct sk_buff*))
648{
649 skb_set_network_header(skb, 0);
650 gro_func(bp, skb);
651 tcp_gro_complete(skb);
652}
9969085e
YM
653#endif
654
655static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
656 struct sk_buff *skb)
657{
658#ifdef CONFIG_INET
cbf1de72 659 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
660 switch (be16_to_cpu(skb->protocol)) {
661 case ETH_P_IP:
2c2d06d5 662 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
663 break;
664 case ETH_P_IPV6:
2c2d06d5 665 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
666 break;
667 default:
2c2d06d5 668 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
669 be16_to_cpu(skb->protocol));
670 }
9969085e
YM
671 }
672#endif
673 napi_gro_receive(&fp->napi, skb);
674}
675
1191cb83
ED
676static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
677 struct bnx2x_agg_info *tpa_info,
678 u16 pages,
679 struct eth_end_agg_rx_cqe *cqe,
680 u16 cqe_idx)
9f6c9258 681{
619c5cb6 682 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 683 u8 pad = tpa_info->placement_offset;
619c5cb6 684 u16 len = tpa_info->len_on_bd;
e52fcb24 685 struct sk_buff *skb = NULL;
621b4d66 686 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
687 u8 old_tpa_state = tpa_info->tpa_state;
688
689 tpa_info->tpa_state = BNX2X_TPA_STOP;
690
691 /* If we there was an error during the handling of the TPA_START -
692 * drop this aggregation.
693 */
694 if (old_tpa_state == BNX2X_TPA_ERROR)
695 goto drop;
696
e52fcb24 697 /* Try to allocate the new data */
d46d132c 698 new_data = bnx2x_frag_alloc(fp);
9f6c9258
DK
699 /* Unmap skb in the pool anyway, as we are going to change
700 pool entry status to BNX2X_TPA_STOP even if new skb allocation
701 fails. */
702 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 703 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 704 if (likely(new_data))
d46d132c 705 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 706
e52fcb24 707 if (likely(skb)) {
9f6c9258 708#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 709 if (pad + len > fp->rx_buf_size) {
51c1a580 710 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 711 pad, len, fp->rx_buf_size);
9f6c9258
DK
712 bnx2x_panic();
713 return;
714 }
715#endif
716
e52fcb24 717 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 718 skb_put(skb, len);
e52fcb24 719 skb->rxhash = tpa_info->rxhash;
a334b5fb 720 skb->l4_rxhash = tpa_info->l4_rxhash;
9f6c9258
DK
721
722 skb->protocol = eth_type_trans(skb, bp->dev);
723 skb->ip_summed = CHECKSUM_UNNECESSARY;
724
621b4d66
DK
725 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
726 skb, cqe, cqe_idx)) {
619c5cb6 727 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 728 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 729 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 730 } else {
51c1a580
MS
731 DP(NETIF_MSG_RX_STATUS,
732 "Failed to allocate new pages - dropping packet!\n");
40955532 733 dev_kfree_skb_any(skb);
9f6c9258
DK
734 }
735
736
e52fcb24
ED
737 /* put new data in bin */
738 rx_buf->data = new_data;
9f6c9258 739
619c5cb6 740 return;
9f6c9258 741 }
d46d132c 742 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
743drop:
744 /* drop the packet and keep the buffer in the bin */
745 DP(NETIF_MSG_RX_STATUS,
746 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 747 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
748}
749
1191cb83
ED
750static int bnx2x_alloc_rx_data(struct bnx2x *bp,
751 struct bnx2x_fastpath *fp, u16 index)
752{
753 u8 *data;
754 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
755 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
756 dma_addr_t mapping;
757
d46d132c 758 data = bnx2x_frag_alloc(fp);
1191cb83
ED
759 if (unlikely(data == NULL))
760 return -ENOMEM;
761
762 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
763 fp->rx_buf_size,
764 DMA_FROM_DEVICE);
765 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 766 bnx2x_frag_free(fp, data);
1191cb83
ED
767 BNX2X_ERR("Can't map rx data\n");
768 return -ENOMEM;
769 }
770
771 rx_buf->data = data;
772 dma_unmap_addr_set(rx_buf, mapping, mapping);
773
774 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
775 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
776
777 return 0;
778}
779
15192a8c
BW
780static
781void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
782 struct bnx2x_fastpath *fp,
783 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 784{
e488921f
MS
785 /* Do nothing if no L4 csum validation was done.
786 * We do not check whether IP csum was validated. For IPv4 we assume
787 * that if the card got as far as validating the L4 csum, it also
788 * validated the IP csum. IPv6 has no IP csum.
789 */
d6cb3e41 790 if (cqe->fast_path_cqe.status_flags &
e488921f 791 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
792 return;
793
e488921f 794 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
795
796 if (cqe->fast_path_cqe.type_error_flags &
797 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
798 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 799 qstats->hw_csum_err++;
d6cb3e41
ED
800 else
801 skb->ip_summed = CHECKSUM_UNNECESSARY;
802}
9f6c9258
DK
803
804int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
805{
806 struct bnx2x *bp = fp->bp;
807 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
808 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
809 int rx_pkt = 0;
810
811#ifdef BNX2X_STOP_ON_ERROR
812 if (unlikely(bp->panic))
813 return 0;
814#endif
815
816 /* CQ "next element" is of the size of the regular element,
817 that's why it's ok here */
818 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
819 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
820 hw_comp_cons++;
821
822 bd_cons = fp->rx_bd_cons;
823 bd_prod = fp->rx_bd_prod;
824 bd_prod_fw = bd_prod;
825 sw_comp_cons = fp->rx_comp_cons;
826 sw_comp_prod = fp->rx_comp_prod;
827
828 /* Memory barrier necessary as speculative reads of the rx
829 * buffer can be ahead of the index in the status block
830 */
831 rmb();
832
833 DP(NETIF_MSG_RX_STATUS,
834 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
835 fp->index, hw_comp_cons, sw_comp_cons);
836
837 while (sw_comp_cons != hw_comp_cons) {
838 struct sw_rx_bd *rx_buf = NULL;
839 struct sk_buff *skb;
840 union eth_rx_cqe *cqe;
619c5cb6 841 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 842 u8 cqe_fp_flags;
619c5cb6 843 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 844 u16 len, pad, queue;
e52fcb24 845 u8 *data;
a334b5fb 846 bool l4_rxhash;
9f6c9258 847
619c5cb6
VZ
848#ifdef BNX2X_STOP_ON_ERROR
849 if (unlikely(bp->panic))
850 return 0;
851#endif
852
9f6c9258
DK
853 comp_ring_cons = RCQ_BD(sw_comp_cons);
854 bd_prod = RX_BD(bd_prod);
855 bd_cons = RX_BD(bd_cons);
856
9f6c9258 857 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
858 cqe_fp = &cqe->fast_path_cqe;
859 cqe_fp_flags = cqe_fp->type_error_flags;
860 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 861
51c1a580
MS
862 DP(NETIF_MSG_RX_STATUS,
863 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
864 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
865 cqe_fp_flags, cqe_fp->status_flags,
866 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
867 le16_to_cpu(cqe_fp->vlan_tag),
868 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
869
870 /* is this a slowpath msg? */
619c5cb6 871 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
872 bnx2x_sp_event(fp, cqe);
873 goto next_cqe;
e52fcb24 874 }
621b4d66 875
e52fcb24
ED
876 rx_buf = &fp->rx_buf_ring[bd_cons];
877 data = rx_buf->data;
9f6c9258 878
e52fcb24 879 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
880 struct bnx2x_agg_info *tpa_info;
881 u16 frag_size, pages;
619c5cb6 882#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
883 /* sanity check */
884 if (fp->disable_tpa &&
885 (CQE_TYPE_START(cqe_fp_type) ||
886 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 887 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 888 CQE_TYPE(cqe_fp_type));
619c5cb6 889#endif
9f6c9258 890
e52fcb24
ED
891 if (CQE_TYPE_START(cqe_fp_type)) {
892 u16 queue = cqe_fp->queue_index;
893 DP(NETIF_MSG_RX_STATUS,
894 "calling tpa_start on queue %d\n",
895 queue);
9f6c9258 896
e52fcb24
ED
897 bnx2x_tpa_start(fp, queue,
898 bd_cons, bd_prod,
899 cqe_fp);
621b4d66 900
e52fcb24 901 goto next_rx;
e52fcb24 902
621b4d66
DK
903 }
904 queue = cqe->end_agg_cqe.queue_index;
905 tpa_info = &fp->tpa_info[queue];
906 DP(NETIF_MSG_RX_STATUS,
907 "calling tpa_stop on queue %d\n",
908 queue);
909
910 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
911 tpa_info->len_on_bd;
912
913 if (fp->mode == TPA_MODE_GRO)
914 pages = (frag_size + tpa_info->full_page - 1) /
915 tpa_info->full_page;
916 else
917 pages = SGE_PAGE_ALIGN(frag_size) >>
918 SGE_PAGE_SHIFT;
919
920 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
921 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 922#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
923 if (bp->panic)
924 return 0;
9f6c9258
DK
925#endif
926
621b4d66
DK
927 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
928 goto next_cqe;
e52fcb24
ED
929 }
930 /* non TPA */
621b4d66 931 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
932 pad = cqe_fp->placement_offset;
933 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 934 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
935 pad + RX_COPY_THRESH,
936 DMA_FROM_DEVICE);
937 pad += NET_SKB_PAD;
938 prefetch(data + pad); /* speedup eth_type_trans() */
939 /* is this an error packet? */
940 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 941 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
942 "ERROR flags %x rx packet %u\n",
943 cqe_fp_flags, sw_comp_cons);
15192a8c 944 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
945 goto reuse_rx;
946 }
9f6c9258 947
e52fcb24
ED
948 /* Since we don't have a jumbo ring
949 * copy small packets if mtu > 1500
950 */
951 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
952 (len <= RX_COPY_THRESH)) {
953 skb = netdev_alloc_skb_ip_align(bp->dev, len);
954 if (skb == NULL) {
51c1a580 955 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 956 "ERROR packet dropped because of alloc failure\n");
15192a8c 957 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
958 goto reuse_rx;
959 }
e52fcb24
ED
960 memcpy(skb->data, data + pad, len);
961 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
962 } else {
963 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 964 dma_unmap_single(&bp->pdev->dev,
e52fcb24 965 dma_unmap_addr(rx_buf, mapping),
a8c94b91 966 fp->rx_buf_size,
9f6c9258 967 DMA_FROM_DEVICE);
d46d132c 968 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 969 if (unlikely(!skb)) {
d46d132c 970 bnx2x_frag_free(fp, data);
15192a8c
BW
971 bnx2x_fp_qstats(bp, fp)->
972 rx_skb_alloc_failed++;
e52fcb24
ED
973 goto next_rx;
974 }
9f6c9258 975 skb_reserve(skb, pad);
9f6c9258 976 } else {
51c1a580
MS
977 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
978 "ERROR packet dropped because of alloc failure\n");
15192a8c 979 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 980reuse_rx:
e52fcb24 981 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
982 goto next_rx;
983 }
036d2df9 984 }
9f6c9258 985
036d2df9
DK
986 skb_put(skb, len);
987 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 988
036d2df9 989 /* Set Toeplitz hash for a none-LRO skb */
a334b5fb
ED
990 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
991 skb->l4_rxhash = l4_rxhash;
9f6c9258 992
036d2df9 993 skb_checksum_none_assert(skb);
f85582f8 994
d6cb3e41 995 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
996 bnx2x_csum_validate(skb, cqe, fp,
997 bnx2x_fp_qstats(bp, fp));
9f6c9258 998
f233cafe 999 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1000
619c5cb6
VZ
1001 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1002 PARSING_FLAGS_VLAN)
86a9bad3 1003 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1004 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 1005 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
1006
1007
1008next_rx:
e52fcb24 1009 rx_buf->data = NULL;
9f6c9258
DK
1010
1011 bd_cons = NEXT_RX_IDX(bd_cons);
1012 bd_prod = NEXT_RX_IDX(bd_prod);
1013 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1014 rx_pkt++;
1015next_cqe:
1016 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1017 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1018
1019 if (rx_pkt == budget)
1020 break;
1021 } /* while */
1022
1023 fp->rx_bd_cons = bd_cons;
1024 fp->rx_bd_prod = bd_prod_fw;
1025 fp->rx_comp_cons = sw_comp_cons;
1026 fp->rx_comp_prod = sw_comp_prod;
1027
1028 /* Update producers */
1029 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1030 fp->rx_sge_prod);
1031
1032 fp->rx_pkt += rx_pkt;
1033 fp->rx_calls++;
1034
1035 return rx_pkt;
1036}
1037
1038static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1039{
1040 struct bnx2x_fastpath *fp = fp_cookie;
1041 struct bnx2x *bp = fp->bp;
6383c0b3 1042 u8 cos;
9f6c9258 1043
51c1a580
MS
1044 DP(NETIF_MSG_INTR,
1045 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1046 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1047
523224a3 1048 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1049
1050#ifdef BNX2X_STOP_ON_ERROR
1051 if (unlikely(bp->panic))
1052 return IRQ_HANDLED;
1053#endif
1054
1055 /* Handle Rx and Tx according to MSI-X vector */
1056 prefetch(fp->rx_cons_sb);
6383c0b3
AE
1057
1058 for_each_cos_in_tx_queue(fp, cos)
65565884 1059 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1060
523224a3 1061 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1062 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1063
1064 return IRQ_HANDLED;
1065}
1066
9f6c9258
DK
1067/* HW Lock for shared dual port PHYs */
1068void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1069{
1070 mutex_lock(&bp->port.phy_mutex);
1071
8203c4b6 1072 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1073}
1074
1075void bnx2x_release_phy_lock(struct bnx2x *bp)
1076{
8203c4b6 1077 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1078
1079 mutex_unlock(&bp->port.phy_mutex);
1080}
1081
0793f83f
DK
1082/* calculates MF speed according to current linespeed and MF configuration */
1083u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1084{
1085 u16 line_speed = bp->link_vars.line_speed;
1086 if (IS_MF(bp)) {
faa6fcbb
DK
1087 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1088 bp->mf_config[BP_VN(bp)]);
1089
1090 /* Calculate the current MAX line speed limit for the MF
1091 * devices
0793f83f 1092 */
faa6fcbb
DK
1093 if (IS_MF_SI(bp))
1094 line_speed = (line_speed * maxCfg) / 100;
1095 else { /* SD mode */
0793f83f
DK
1096 u16 vn_max_rate = maxCfg * 100;
1097
1098 if (vn_max_rate < line_speed)
1099 line_speed = vn_max_rate;
faa6fcbb 1100 }
0793f83f
DK
1101 }
1102
1103 return line_speed;
1104}
1105
2ae17f66
VZ
1106/**
1107 * bnx2x_fill_report_data - fill link report data to report
1108 *
1109 * @bp: driver handle
1110 * @data: link state to update
1111 *
1112 * It uses a none-atomic bit operations because is called under the mutex.
1113 */
1191cb83
ED
1114static void bnx2x_fill_report_data(struct bnx2x *bp,
1115 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1116{
1117 u16 line_speed = bnx2x_get_mf_speed(bp);
1118
1119 memset(data, 0, sizeof(*data));
1120
1121 /* Fill the report data: efective line speed */
1122 data->line_speed = line_speed;
1123
1124 /* Link is down */
1125 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1126 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1127 &data->link_report_flags);
1128
1129 /* Full DUPLEX */
1130 if (bp->link_vars.duplex == DUPLEX_FULL)
1131 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1132
1133 /* Rx Flow Control is ON */
1134 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1135 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1136
1137 /* Tx Flow Control is ON */
1138 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1139 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1140}
1141
1142/**
1143 * bnx2x_link_report - report link status to OS.
1144 *
1145 * @bp: driver handle
1146 *
1147 * Calls the __bnx2x_link_report() under the same locking scheme
1148 * as a link/PHY state managing code to ensure a consistent link
1149 * reporting.
1150 */
1151
9f6c9258
DK
1152void bnx2x_link_report(struct bnx2x *bp)
1153{
2ae17f66
VZ
1154 bnx2x_acquire_phy_lock(bp);
1155 __bnx2x_link_report(bp);
1156 bnx2x_release_phy_lock(bp);
1157}
9f6c9258 1158
2ae17f66
VZ
1159/**
1160 * __bnx2x_link_report - report link status to OS.
1161 *
1162 * @bp: driver handle
1163 *
1164 * None atomic inmlementation.
1165 * Should be called under the phy_lock.
1166 */
1167void __bnx2x_link_report(struct bnx2x *bp)
1168{
1169 struct bnx2x_link_report_data cur_data;
9f6c9258 1170
2ae17f66 1171 /* reread mf_cfg */
ad5afc89 1172 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1173 bnx2x_read_mf_cfg(bp);
1174
1175 /* Read the current link report info */
1176 bnx2x_fill_report_data(bp, &cur_data);
1177
1178 /* Don't report link down or exactly the same link status twice */
1179 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1180 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1181 &bp->last_reported_link.link_report_flags) &&
1182 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1183 &cur_data.link_report_flags)))
1184 return;
1185
1186 bp->link_cnt++;
9f6c9258 1187
2ae17f66
VZ
1188 /* We are going to report a new link parameters now -
1189 * remember the current data for the next time.
1190 */
1191 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1192
2ae17f66
VZ
1193 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1194 &cur_data.link_report_flags)) {
1195 netif_carrier_off(bp->dev);
1196 netdev_err(bp->dev, "NIC Link is Down\n");
1197 return;
1198 } else {
94f05b0f
JP
1199 const char *duplex;
1200 const char *flow;
1201
2ae17f66 1202 netif_carrier_on(bp->dev);
9f6c9258 1203
2ae17f66
VZ
1204 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1205 &cur_data.link_report_flags))
94f05b0f 1206 duplex = "full";
9f6c9258 1207 else
94f05b0f 1208 duplex = "half";
9f6c9258 1209
2ae17f66
VZ
1210 /* Handle the FC at the end so that only these flags would be
1211 * possibly set. This way we may easily check if there is no FC
1212 * enabled.
1213 */
1214 if (cur_data.link_report_flags) {
1215 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1216 &cur_data.link_report_flags)) {
2ae17f66
VZ
1217 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1218 &cur_data.link_report_flags))
94f05b0f
JP
1219 flow = "ON - receive & transmit";
1220 else
1221 flow = "ON - receive";
9f6c9258 1222 } else {
94f05b0f 1223 flow = "ON - transmit";
9f6c9258 1224 }
94f05b0f
JP
1225 } else {
1226 flow = "none";
9f6c9258 1227 }
94f05b0f
JP
1228 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1229 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1230 }
1231}
1232
1191cb83
ED
1233static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1234{
1235 int i;
1236
1237 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1238 struct eth_rx_sge *sge;
1239
1240 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1241 sge->addr_hi =
1242 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1243 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1244
1245 sge->addr_lo =
1246 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1247 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1248 }
1249}
1250
1251static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1252 struct bnx2x_fastpath *fp, int last)
1253{
1254 int i;
1255
1256 for (i = 0; i < last; i++) {
1257 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1258 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1259 u8 *data = first_buf->data;
1260
1261 if (data == NULL) {
1262 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1263 continue;
1264 }
1265 if (tpa_info->tpa_state == BNX2X_TPA_START)
1266 dma_unmap_single(&bp->pdev->dev,
1267 dma_unmap_addr(first_buf, mapping),
1268 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1269 bnx2x_frag_free(fp, data);
1191cb83
ED
1270 first_buf->data = NULL;
1271 }
1272}
1273
55c11941
MS
1274void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1275{
1276 int j;
1277
1278 for_each_rx_queue_cnic(bp, j) {
1279 struct bnx2x_fastpath *fp = &bp->fp[j];
1280
1281 fp->rx_bd_cons = 0;
1282
1283 /* Activate BD ring */
1284 /* Warning!
1285 * this will generate an interrupt (to the TSTORM)
1286 * must only be done after chip is initialized
1287 */
1288 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1289 fp->rx_sge_prod);
1290 }
1291}
1292
9f6c9258
DK
1293void bnx2x_init_rx_rings(struct bnx2x *bp)
1294{
1295 int func = BP_FUNC(bp);
523224a3 1296 u16 ring_prod;
9f6c9258 1297 int i, j;
25141580 1298
b3b83c3f 1299 /* Allocate TPA resources */
55c11941 1300 for_each_eth_queue(bp, j) {
523224a3 1301 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1302
a8c94b91
VZ
1303 DP(NETIF_MSG_IFUP,
1304 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1305
523224a3 1306 if (!fp->disable_tpa) {
619c5cb6 1307 /* Fill the per-aggregtion pool */
dfacf138 1308 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1309 struct bnx2x_agg_info *tpa_info =
1310 &fp->tpa_info[i];
1311 struct sw_rx_bd *first_buf =
1312 &tpa_info->first_buf;
1313
d46d132c 1314 first_buf->data = bnx2x_frag_alloc(fp);
e52fcb24 1315 if (!first_buf->data) {
51c1a580
MS
1316 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1317 j);
9f6c9258
DK
1318 bnx2x_free_tpa_pool(bp, fp, i);
1319 fp->disable_tpa = 1;
1320 break;
1321 }
619c5cb6
VZ
1322 dma_unmap_addr_set(first_buf, mapping, 0);
1323 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1324 }
523224a3
DK
1325
1326 /* "next page" elements initialization */
1327 bnx2x_set_next_page_sgl(fp);
1328
1329 /* set SGEs bit mask */
1330 bnx2x_init_sge_ring_bit_mask(fp);
1331
1332 /* Allocate SGEs and initialize the ring elements */
1333 for (i = 0, ring_prod = 0;
1334 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1335
1336 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1337 BNX2X_ERR("was only able to allocate %d rx sges\n",
1338 i);
1339 BNX2X_ERR("disabling TPA for queue[%d]\n",
1340 j);
523224a3 1341 /* Cleanup already allocated elements */
619c5cb6
VZ
1342 bnx2x_free_rx_sge_range(bp, fp,
1343 ring_prod);
1344 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1345 MAX_AGG_QS(bp));
523224a3
DK
1346 fp->disable_tpa = 1;
1347 ring_prod = 0;
1348 break;
1349 }
1350 ring_prod = NEXT_SGE_IDX(ring_prod);
1351 }
1352
1353 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1354 }
1355 }
1356
55c11941 1357 for_each_eth_queue(bp, j) {
9f6c9258
DK
1358 struct bnx2x_fastpath *fp = &bp->fp[j];
1359
1360 fp->rx_bd_cons = 0;
9f6c9258 1361
b3b83c3f
DK
1362 /* Activate BD ring */
1363 /* Warning!
1364 * this will generate an interrupt (to the TSTORM)
1365 * must only be done after chip is initialized
1366 */
1367 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1368 fp->rx_sge_prod);
9f6c9258 1369
9f6c9258
DK
1370 if (j != 0)
1371 continue;
1372
619c5cb6 1373 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1374 REG_WR(bp, BAR_USTRORM_INTMEM +
1375 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1376 U64_LO(fp->rx_comp_mapping));
1377 REG_WR(bp, BAR_USTRORM_INTMEM +
1378 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1379 U64_HI(fp->rx_comp_mapping));
1380 }
9f6c9258
DK
1381 }
1382}
f85582f8 1383
55c11941 1384static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1385{
6383c0b3 1386 u8 cos;
55c11941 1387 struct bnx2x *bp = fp->bp;
9f6c9258 1388
55c11941
MS
1389 for_each_cos_in_tx_queue(fp, cos) {
1390 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1391 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1392
55c11941
MS
1393 u16 sw_prod = txdata->tx_pkt_prod;
1394 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1395
55c11941
MS
1396 while (sw_cons != sw_prod) {
1397 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1398 &pkts_compl, &bytes_compl);
1399 sw_cons++;
9f6c9258 1400 }
55c11941
MS
1401
1402 netdev_tx_reset_queue(
1403 netdev_get_tx_queue(bp->dev,
1404 txdata->txq_index));
1405 }
1406}
1407
1408static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1409{
1410 int i;
1411
1412 for_each_tx_queue_cnic(bp, i) {
1413 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1414 }
1415}
1416
1417static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1418{
1419 int i;
1420
1421 for_each_eth_queue(bp, i) {
1422 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1423 }
1424}
1425
b3b83c3f
DK
1426static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1427{
1428 struct bnx2x *bp = fp->bp;
1429 int i;
1430
1431 /* ring wasn't allocated */
1432 if (fp->rx_buf_ring == NULL)
1433 return;
1434
1435 for (i = 0; i < NUM_RX_BD; i++) {
1436 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1437 u8 *data = rx_buf->data;
b3b83c3f 1438
e52fcb24 1439 if (data == NULL)
b3b83c3f 1440 continue;
b3b83c3f
DK
1441 dma_unmap_single(&bp->pdev->dev,
1442 dma_unmap_addr(rx_buf, mapping),
1443 fp->rx_buf_size, DMA_FROM_DEVICE);
1444
e52fcb24 1445 rx_buf->data = NULL;
d46d132c 1446 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1447 }
1448}
1449
55c11941
MS
1450static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1451{
1452 int j;
1453
1454 for_each_rx_queue_cnic(bp, j) {
1455 bnx2x_free_rx_bds(&bp->fp[j]);
1456 }
1457}
1458
9f6c9258
DK
1459static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1460{
b3b83c3f 1461 int j;
9f6c9258 1462
55c11941 1463 for_each_eth_queue(bp, j) {
9f6c9258
DK
1464 struct bnx2x_fastpath *fp = &bp->fp[j];
1465
b3b83c3f 1466 bnx2x_free_rx_bds(fp);
9f6c9258 1467
9f6c9258 1468 if (!fp->disable_tpa)
dfacf138 1469 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1470 }
1471}
1472
55c11941
MS
1473void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1474{
1475 bnx2x_free_tx_skbs_cnic(bp);
1476 bnx2x_free_rx_skbs_cnic(bp);
1477}
1478
9f6c9258
DK
1479void bnx2x_free_skbs(struct bnx2x *bp)
1480{
1481 bnx2x_free_tx_skbs(bp);
1482 bnx2x_free_rx_skbs(bp);
1483}
1484
e3835b99
DK
1485void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1486{
1487 /* load old values */
1488 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1489
1490 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1491 /* leave all but MAX value */
1492 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1493
1494 /* set new MAX value */
1495 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1496 & FUNC_MF_CFG_MAX_BW_MASK;
1497
1498 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1499 }
1500}
1501
ca92429f
DK
1502/**
1503 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1504 *
1505 * @bp: driver handle
1506 * @nvecs: number of vectors to be released
1507 */
1508static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1509{
ca92429f 1510 int i, offset = 0;
9f6c9258 1511
ca92429f
DK
1512 if (nvecs == offset)
1513 return;
ad5afc89
AE
1514
1515 /* VFs don't have a default SB */
1516 if (IS_PF(bp)) {
1517 free_irq(bp->msix_table[offset].vector, bp->dev);
1518 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1519 bp->msix_table[offset].vector);
1520 offset++;
1521 }
55c11941
MS
1522
1523 if (CNIC_SUPPORT(bp)) {
1524 if (nvecs == offset)
1525 return;
1526 offset++;
1527 }
ca92429f 1528
ec6ba945 1529 for_each_eth_queue(bp, i) {
ca92429f
DK
1530 if (nvecs == offset)
1531 return;
51c1a580
MS
1532 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1533 i, bp->msix_table[offset].vector);
9f6c9258 1534
ca92429f 1535 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1536 }
1537}
1538
d6214d7a 1539void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1540{
30a5de77 1541 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1542 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1543 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1544
1545 /* vfs don't have a default status block */
1546 if (IS_PF(bp))
1547 nvecs++;
1548
1549 bnx2x_free_msix_irqs(bp, nvecs);
1550 } else {
30a5de77 1551 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1552 }
9f6c9258
DK
1553}
1554
0e8d2ec5 1555int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1556{
1ab4434c 1557 int msix_vec = 0, i, rc;
9f6c9258 1558
1ab4434c
AE
1559 /* VFs don't have a default status block */
1560 if (IS_PF(bp)) {
1561 bp->msix_table[msix_vec].entry = msix_vec;
1562 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1563 bp->msix_table[0].entry);
1564 msix_vec++;
1565 }
9f6c9258 1566
55c11941
MS
1567 /* Cnic requires an msix vector for itself */
1568 if (CNIC_SUPPORT(bp)) {
1569 bp->msix_table[msix_vec].entry = msix_vec;
1570 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1571 msix_vec, bp->msix_table[msix_vec].entry);
1572 msix_vec++;
1573 }
1574
6383c0b3 1575 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1576 for_each_eth_queue(bp, i) {
d6214d7a 1577 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1578 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1579 msix_vec, msix_vec, i);
d6214d7a 1580 msix_vec++;
9f6c9258
DK
1581 }
1582
1ab4434c
AE
1583 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1584 msix_vec);
d6214d7a 1585
1ab4434c 1586 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1587
1588 /*
1589 * reconfigure number of tx/rx queues according to available
1590 * MSI-X vectors
1591 */
55c11941 1592 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1593 /* how less vectors we will have? */
1ab4434c 1594 int diff = msix_vec - rc;
9f6c9258 1595
51c1a580 1596 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1597
1598 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1599
1600 if (rc) {
30a5de77
DK
1601 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1602 goto no_msix;
9f6c9258 1603 }
d6214d7a
DK
1604 /*
1605 * decrease number of queues by number of unallocated entries
1606 */
55c11941
MS
1607 bp->num_ethernet_queues -= diff;
1608 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1609
51c1a580 1610 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1611 bp->num_queues);
1612 } else if (rc > 0) {
1613 /* Get by with single vector */
1614 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1615 if (rc) {
1616 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1617 rc);
1618 goto no_msix;
1619 }
1620
1621 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1622 bp->flags |= USING_SINGLE_MSIX_FLAG;
1623
55c11941
MS
1624 BNX2X_DEV_INFO("set number of queues to 1\n");
1625 bp->num_ethernet_queues = 1;
1626 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1627 } else if (rc < 0) {
51c1a580 1628 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1629 goto no_msix;
9f6c9258
DK
1630 }
1631
1632 bp->flags |= USING_MSIX_FLAG;
1633
1634 return 0;
30a5de77
DK
1635
1636no_msix:
1637 /* fall to INTx if not enough memory */
1638 if (rc == -ENOMEM)
1639 bp->flags |= DISABLE_MSI_FLAG;
1640
1641 return rc;
9f6c9258
DK
1642}
1643
1644static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1645{
ca92429f 1646 int i, rc, offset = 0;
9f6c9258 1647
ad5afc89
AE
1648 /* no default status block for vf */
1649 if (IS_PF(bp)) {
1650 rc = request_irq(bp->msix_table[offset++].vector,
1651 bnx2x_msix_sp_int, 0,
1652 bp->dev->name, bp->dev);
1653 if (rc) {
1654 BNX2X_ERR("request sp irq failed\n");
1655 return -EBUSY;
1656 }
9f6c9258
DK
1657 }
1658
55c11941
MS
1659 if (CNIC_SUPPORT(bp))
1660 offset++;
1661
ec6ba945 1662 for_each_eth_queue(bp, i) {
9f6c9258
DK
1663 struct bnx2x_fastpath *fp = &bp->fp[i];
1664 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1665 bp->dev->name, i);
1666
d6214d7a 1667 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1668 bnx2x_msix_fp_int, 0, fp->name, fp);
1669 if (rc) {
ca92429f
DK
1670 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1671 bp->msix_table[offset].vector, rc);
1672 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1673 return -EBUSY;
1674 }
1675
d6214d7a 1676 offset++;
9f6c9258
DK
1677 }
1678
ec6ba945 1679 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1680 if (IS_PF(bp)) {
1681 offset = 1 + CNIC_SUPPORT(bp);
1682 netdev_info(bp->dev,
1683 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1684 bp->msix_table[0].vector,
1685 0, bp->msix_table[offset].vector,
1686 i - 1, bp->msix_table[offset + i - 1].vector);
1687 } else {
1688 offset = CNIC_SUPPORT(bp);
1689 netdev_info(bp->dev,
1690 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1691 0, bp->msix_table[offset].vector,
1692 i - 1, bp->msix_table[offset + i - 1].vector);
1693 }
9f6c9258
DK
1694 return 0;
1695}
1696
d6214d7a 1697int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1698{
1699 int rc;
1700
1701 rc = pci_enable_msi(bp->pdev);
1702 if (rc) {
51c1a580 1703 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1704 return -1;
1705 }
1706 bp->flags |= USING_MSI_FLAG;
1707
1708 return 0;
1709}
1710
1711static int bnx2x_req_irq(struct bnx2x *bp)
1712{
1713 unsigned long flags;
30a5de77 1714 unsigned int irq;
9f6c9258 1715
30a5de77 1716 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1717 flags = 0;
1718 else
1719 flags = IRQF_SHARED;
1720
30a5de77
DK
1721 if (bp->flags & USING_MSIX_FLAG)
1722 irq = bp->msix_table[0].vector;
1723 else
1724 irq = bp->pdev->irq;
1725
1726 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1727}
1728
ecf01c22 1729int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1730{
1731 int rc = 0;
30a5de77
DK
1732 if (bp->flags & USING_MSIX_FLAG &&
1733 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1734 rc = bnx2x_req_msix_irqs(bp);
1735 if (rc)
1736 return rc;
1737 } else {
619c5cb6
VZ
1738 rc = bnx2x_req_irq(bp);
1739 if (rc) {
1740 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1741 return rc;
1742 }
1743 if (bp->flags & USING_MSI_FLAG) {
1744 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1745 netdev_info(bp->dev, "using MSI IRQ %d\n",
1746 bp->dev->irq);
1747 }
1748 if (bp->flags & USING_MSIX_FLAG) {
1749 bp->dev->irq = bp->msix_table[0].vector;
1750 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1751 bp->dev->irq);
619c5cb6
VZ
1752 }
1753 }
1754
1755 return 0;
1756}
1757
55c11941
MS
1758static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1759{
1760 int i;
1761
1762 for_each_rx_queue_cnic(bp, i)
1763 napi_enable(&bnx2x_fp(bp, i, napi));
1764}
1765
1191cb83 1766static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1767{
1768 int i;
1769
55c11941 1770 for_each_eth_queue(bp, i)
9f6c9258
DK
1771 napi_enable(&bnx2x_fp(bp, i, napi));
1772}
1773
55c11941
MS
1774static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1775{
1776 int i;
1777
1778 for_each_rx_queue_cnic(bp, i)
1779 napi_disable(&bnx2x_fp(bp, i, napi));
1780}
1781
1191cb83 1782static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1783{
1784 int i;
1785
55c11941 1786 for_each_eth_queue(bp, i)
9f6c9258
DK
1787 napi_disable(&bnx2x_fp(bp, i, napi));
1788}
1789
1790void bnx2x_netif_start(struct bnx2x *bp)
1791{
4b7ed897
DK
1792 if (netif_running(bp->dev)) {
1793 bnx2x_napi_enable(bp);
55c11941
MS
1794 if (CNIC_LOADED(bp))
1795 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1796 bnx2x_int_enable(bp);
1797 if (bp->state == BNX2X_STATE_OPEN)
1798 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1799 }
1800}
1801
1802void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1803{
1804 bnx2x_int_disable_sync(bp, disable_hw);
1805 bnx2x_napi_disable(bp);
55c11941
MS
1806 if (CNIC_LOADED(bp))
1807 bnx2x_napi_disable_cnic(bp);
9f6c9258 1808}
9f6c9258 1809
8307fa3e
VZ
1810u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1811{
8307fa3e 1812 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1813
55c11941 1814 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1815 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1816 u16 ether_type = ntohs(hdr->h_proto);
1817
1818 /* Skip VLAN tag if present */
1819 if (ether_type == ETH_P_8021Q) {
1820 struct vlan_ethhdr *vhdr =
1821 (struct vlan_ethhdr *)skb->data;
1822
1823 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1824 }
1825
1826 /* If ethertype is FCoE or FIP - use FCoE ring */
1827 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1828 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1829 }
55c11941 1830
cdb9d6ae 1831 /* select a non-FCoE queue */
6383c0b3 1832 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1833}
1834
d6214d7a
DK
1835void bnx2x_set_num_queues(struct bnx2x *bp)
1836{
96305234 1837 /* RSS queues */
55c11941 1838 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1839
a3348722
BW
1840 /* override in STORAGE SD modes */
1841 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1842 bp->num_ethernet_queues = 1;
1843
ec6ba945 1844 /* Add special queues */
55c11941
MS
1845 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1846 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1847
1848 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1849}
1850
cdb9d6ae
VZ
1851/**
1852 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1853 *
1854 * @bp: Driver handle
1855 *
1856 * We currently support for at most 16 Tx queues for each CoS thus we will
1857 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1858 * bp->max_cos.
1859 *
1860 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1861 * index after all ETH L2 indices.
1862 *
1863 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1864 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1865 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1866 *
1867 * The proper configuration of skb->queue_mapping is handled by
1868 * bnx2x_select_queue() and __skb_tx_hash().
1869 *
1870 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1871 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1872 */
55c11941 1873static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1874{
6383c0b3 1875 int rc, tx, rx;
ec6ba945 1876
65565884 1877 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1878 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1879
6383c0b3 1880/* account for fcoe queue */
55c11941
MS
1881 if (include_cnic && !NO_FCOE(bp)) {
1882 rx++;
1883 tx++;
6383c0b3 1884 }
6383c0b3
AE
1885
1886 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1887 if (rc) {
1888 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1889 return rc;
1890 }
1891 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1892 if (rc) {
1893 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1894 return rc;
1895 }
1896
51c1a580 1897 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1898 tx, rx);
1899
ec6ba945
VZ
1900 return rc;
1901}
1902
1191cb83 1903static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1904{
1905 int i;
1906
1907 for_each_queue(bp, i) {
1908 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1909 u32 mtu;
a8c94b91
VZ
1910
1911 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1912 if (IS_FCOE_IDX(i))
1913 /*
1914 * Although there are no IP frames expected to arrive to
1915 * this ring we still want to add an
1916 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1917 * overrun attack.
1918 */
e52fcb24 1919 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1920 else
e52fcb24
ED
1921 mtu = bp->dev->mtu;
1922 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1923 IP_HEADER_ALIGNMENT_PADDING +
1924 ETH_OVREHEAD +
1925 mtu +
1926 BNX2X_FW_RX_ALIGN_END;
1927 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
d46d132c
ED
1928 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1929 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1930 else
1931 fp->rx_frag_size = 0;
a8c94b91
VZ
1932 }
1933}
1934
1191cb83 1935static int bnx2x_init_rss_pf(struct bnx2x *bp)
619c5cb6
VZ
1936{
1937 int i;
619c5cb6
VZ
1938 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1939
96305234 1940 /* Prepare the initial contents fo the indirection table if RSS is
619c5cb6
VZ
1941 * enabled
1942 */
5d317c6a
MS
1943 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1944 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1945 bp->fp->cl_id +
1946 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1947
1948 /*
1949 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1950 * per-port, so if explicit configuration is needed , do it only
1951 * for a PMF.
1952 *
1953 * For 57712 and newer on the other hand it's a per-function
1954 * configuration.
1955 */
5d317c6a 1956 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1957}
1958
96305234 1959int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
5d317c6a 1960 bool config_hash)
619c5cb6 1961{
3b603066 1962 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1963
1964 /* Although RSS is meaningless when there is a single HW queue we
1965 * still need it enabled in order to have HW Rx hash generated.
1966 *
1967 * if (!is_eth_multi(bp))
1968 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1969 */
1970
96305234 1971 params.rss_obj = rss_obj;
619c5cb6
VZ
1972
1973 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1974
96305234 1975 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
619c5cb6 1976
96305234
DK
1977 /* RSS configuration */
1978 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1979 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1980 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1981 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
5d317c6a
MS
1982 if (rss_obj->udp_rss_v4)
1983 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1984 if (rss_obj->udp_rss_v6)
1985 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
619c5cb6 1986
96305234
DK
1987 /* Hash bits */
1988 params.rss_result_mask = MULTI_MASK;
619c5cb6 1989
5d317c6a 1990 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 1991
96305234
DK
1992 if (config_hash) {
1993 /* RSS keys */
8376d0bc 1994 prandom_bytes(params.rss_key, sizeof(params.rss_key));
96305234 1995 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
1996 }
1997
1998 return bnx2x_config_rss(bp, &params);
1999}
2000
1191cb83 2001static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2002{
3b603066 2003 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2004
2005 /* Prepare parameters for function state transitions */
2006 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2007
2008 func_params.f_obj = &bp->func_obj;
2009 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2010
2011 func_params.params.hw_init.load_phase = load_code;
2012
2013 return bnx2x_func_state_change(bp, &func_params);
2014}
2015
2016/*
2017 * Cleans the object that have internal lists without sending
2018 * ramrods. Should be run when interrutps are disabled.
2019 */
7fa6f340 2020void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2021{
2022 int rc;
2023 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2024 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2025 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2026
2027 /***************** Cleanup MACs' object first *************************/
2028
2029 /* Wait for completion of requested */
2030 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2031 /* Perform a dry cleanup */
2032 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2033
2034 /* Clean ETH primary MAC */
2035 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2036 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2037 &ramrod_flags);
2038 if (rc != 0)
2039 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2040
2041 /* Cleanup UC list */
2042 vlan_mac_flags = 0;
2043 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2044 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2045 &ramrod_flags);
2046 if (rc != 0)
2047 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2048
2049 /***************** Now clean mcast object *****************************/
2050 rparam.mcast_obj = &bp->mcast_obj;
2051 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2052
2053 /* Add a DEL command... */
2054 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2055 if (rc < 0)
51c1a580
MS
2056 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2057 rc);
619c5cb6
VZ
2058
2059 /* ...and wait until all pending commands are cleared */
2060 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2061 while (rc != 0) {
2062 if (rc < 0) {
2063 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2064 rc);
2065 return;
2066 }
2067
2068 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2069 }
2070}
2071
2072#ifndef BNX2X_STOP_ON_ERROR
2073#define LOAD_ERROR_EXIT(bp, label) \
2074 do { \
2075 (bp)->state = BNX2X_STATE_ERROR; \
2076 goto label; \
2077 } while (0)
55c11941
MS
2078
2079#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2080 do { \
2081 bp->cnic_loaded = false; \
2082 goto label; \
2083 } while (0)
2084#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2085#define LOAD_ERROR_EXIT(bp, label) \
2086 do { \
2087 (bp)->state = BNX2X_STATE_ERROR; \
2088 (bp)->panic = 1; \
2089 return -EBUSY; \
2090 } while (0)
55c11941
MS
2091#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2092 do { \
2093 bp->cnic_loaded = false; \
2094 (bp)->panic = 1; \
2095 return -EBUSY; \
2096 } while (0)
2097#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2098
ad5afc89
AE
2099static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2100{
2101 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2102 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2103 return;
2104}
2105
2106static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2107{
8db573ba 2108 int num_groups, vf_headroom = 0;
ad5afc89 2109 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2110
ad5afc89
AE
2111 /* number of queues for statistics is number of eth queues + FCoE */
2112 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2113
ad5afc89
AE
2114 /* Total number of FW statistics requests =
2115 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2116 * and fcoe l2 queue) stats + num of queues (which includes another 1
2117 * for fcoe l2 queue if applicable)
2118 */
2119 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2120
8db573ba
AE
2121 /* vf stats appear in the request list, but their data is allocated by
2122 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2123 * it is used to determine where to place the vf stats queries in the
2124 * request struct
2125 */
2126 if (IS_SRIOV(bp))
6411280a 2127 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2128
ad5afc89
AE
2129 /* Request is built from stats_query_header and an array of
2130 * stats_query_cmd_group each of which contains
2131 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2132 * configured in the stats_query_header.
2133 */
2134 num_groups =
8db573ba
AE
2135 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2136 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2137 1 : 0));
2138
8db573ba
AE
2139 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2140 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2141 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2142 num_groups * sizeof(struct stats_query_cmd_group);
2143
2144 /* Data for statistics requests + stats_counter
2145 * stats_counter holds per-STORM counters that are incremented
2146 * when STORM has finished with the current request.
2147 * memory for FCoE offloaded statistics are counted anyway,
2148 * even if they will not be sent.
2149 * VF stats are not accounted for here as the data of VF stats is stored
2150 * in memory allocated by the VF, not here.
2151 */
2152 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2153 sizeof(struct per_pf_stats) +
2154 sizeof(struct fcoe_statistics_params) +
2155 sizeof(struct per_queue_stats) * num_queue_stats +
2156 sizeof(struct stats_counter);
2157
2158 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2159 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2160
2161 /* Set shortcuts */
2162 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2163 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2164 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2165 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2166 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2167 bp->fw_stats_req_sz;
2168
2169 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2170 U64_HI(bp->fw_stats_req_mapping),
2171 U64_LO(bp->fw_stats_req_mapping));
2172 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2173 U64_HI(bp->fw_stats_data_mapping),
2174 U64_LO(bp->fw_stats_data_mapping));
2175 return 0;
2176
2177alloc_mem_err:
2178 bnx2x_free_fw_stats_mem(bp);
2179 BNX2X_ERR("Can't allocate FW stats memory\n");
2180 return -ENOMEM;
2181}
2182
2183/* send load request to mcp and analyze response */
2184static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2185{
2186 /* init fw_seq */
2187 bp->fw_seq =
2188 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2189 DRV_MSG_SEQ_NUMBER_MASK);
2190 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2191
2192 /* Get current FW pulse sequence */
2193 bp->fw_drv_pulse_wr_seq =
2194 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2195 DRV_PULSE_SEQ_MASK);
2196 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2197
2198 /* load request */
2199 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2200 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2201
2202 /* if mcp fails to respond we must abort */
2203 if (!(*load_code)) {
2204 BNX2X_ERR("MCP response failure, aborting\n");
2205 return -EBUSY;
2206 }
2207
2208 /* If mcp refused (e.g. other port is in diagnostic mode) we
2209 * must abort
2210 */
2211 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2212 BNX2X_ERR("MCP refused load request, aborting\n");
2213 return -EBUSY;
2214 }
2215 return 0;
2216}
2217
2218/* check whether another PF has already loaded FW to chip. In
2219 * virtualized environments a pf from another VM may have already
2220 * initialized the device including loading FW
2221 */
2222int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2223{
2224 /* is another pf loaded on this engine? */
2225 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2226 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2227 /* build my FW version dword */
2228 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2229 (BCM_5710_FW_MINOR_VERSION << 8) +
2230 (BCM_5710_FW_REVISION_VERSION << 16) +
2231 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2232
2233 /* read loaded FW from chip */
2234 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2235
2236 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2237 loaded_fw, my_fw);
2238
2239 /* abort nic load if version mismatch */
2240 if (my_fw != loaded_fw) {
2241 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
452427b0 2242 loaded_fw, my_fw);
ad5afc89
AE
2243 return -EBUSY;
2244 }
2245 }
2246 return 0;
2247}
2248
2249/* returns the "mcp load_code" according to global load_count array */
2250static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2251{
2252 int path = BP_PATH(bp);
2253
2254 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2255 path, load_count[path][0], load_count[path][1],
2256 load_count[path][2]);
2257 load_count[path][0]++;
2258 load_count[path][1 + port]++;
2259 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2260 path, load_count[path][0], load_count[path][1],
2261 load_count[path][2]);
2262 if (load_count[path][0] == 1)
2263 return FW_MSG_CODE_DRV_LOAD_COMMON;
2264 else if (load_count[path][1 + port] == 1)
2265 return FW_MSG_CODE_DRV_LOAD_PORT;
2266 else
2267 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2268}
2269
2270/* mark PMF if applicable */
2271static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2272{
2273 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2274 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2275 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2276 bp->port.pmf = 1;
2277 /* We need the barrier to ensure the ordering between the
2278 * writing to bp->port.pmf here and reading it from the
2279 * bnx2x_periodic_task().
2280 */
2281 smp_mb();
2282 } else {
2283 bp->port.pmf = 0;
452427b0
YM
2284 }
2285
ad5afc89
AE
2286 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2287}
2288
2289static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2290{
2291 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2292 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2293 (bp->common.shmem2_base)) {
2294 if (SHMEM2_HAS(bp, dcc_support))
2295 SHMEM2_WR(bp, dcc_support,
2296 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2297 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2298 if (SHMEM2_HAS(bp, afex_driver_support))
2299 SHMEM2_WR(bp, afex_driver_support,
2300 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2301 }
2302
2303 /* Set AFEX default VLAN tag to an invalid value */
2304 bp->afex_def_vlan_tag = -1;
452427b0
YM
2305}
2306
1191cb83
ED
2307/**
2308 * bnx2x_bz_fp - zero content of the fastpath structure.
2309 *
2310 * @bp: driver handle
2311 * @index: fastpath index to be zeroed
2312 *
2313 * Makes sure the contents of the bp->fp[index].napi is kept
2314 * intact.
2315 */
2316static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2317{
2318 struct bnx2x_fastpath *fp = &bp->fp[index];
15192a8c 2319
65565884 2320 int cos;
1191cb83 2321 struct napi_struct orig_napi = fp->napi;
15192a8c 2322 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1191cb83 2323 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2324 if (fp->tpa_info)
2325 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2326 sizeof(struct bnx2x_agg_info));
2327 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2328
2329 /* Restore the NAPI object as it has been already initialized */
2330 fp->napi = orig_napi;
15192a8c 2331 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2332 fp->bp = bp;
2333 fp->index = index;
2334 if (IS_ETH_FP(fp))
2335 fp->max_cos = bp->max_cos;
2336 else
2337 /* Special queues support only one CoS */
2338 fp->max_cos = 1;
2339
65565884 2340 /* Init txdata pointers */
65565884
MS
2341 if (IS_FCOE_FP(fp))
2342 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2343 if (IS_ETH_FP(fp))
2344 for_each_cos_in_tx_queue(fp, cos)
2345 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2346 BNX2X_NUM_ETH_QUEUES(bp) + index];
2347
1191cb83
ED
2348 /*
2349 * set the tpa flag for each queue. The tpa flag determines the queue
2350 * minimal size so it must be set prior to queue memory allocation
2351 */
2352 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2353 (bp->flags & GRO_ENABLE_FLAG &&
2354 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2355 if (bp->flags & TPA_ENABLE_FLAG)
2356 fp->mode = TPA_MODE_LRO;
2357 else if (bp->flags & GRO_ENABLE_FLAG)
2358 fp->mode = TPA_MODE_GRO;
2359
1191cb83
ED
2360 /* We don't want TPA on an FCoE L2 ring */
2361 if (IS_FCOE_FP(fp))
2362 fp->disable_tpa = 1;
55c11941
MS
2363}
2364
2365int bnx2x_load_cnic(struct bnx2x *bp)
2366{
2367 int i, rc, port = BP_PORT(bp);
2368
2369 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2370
2371 mutex_init(&bp->cnic_mutex);
2372
ad5afc89
AE
2373 if (IS_PF(bp)) {
2374 rc = bnx2x_alloc_mem_cnic(bp);
2375 if (rc) {
2376 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2377 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2378 }
55c11941
MS
2379 }
2380
2381 rc = bnx2x_alloc_fp_mem_cnic(bp);
2382 if (rc) {
2383 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2384 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2385 }
2386
2387 /* Update the number of queues with the cnic queues */
2388 rc = bnx2x_set_real_num_queues(bp, 1);
2389 if (rc) {
2390 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2391 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2392 }
2393
2394 /* Add all CNIC NAPI objects */
2395 bnx2x_add_all_napi_cnic(bp);
2396 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2397 bnx2x_napi_enable_cnic(bp);
2398
2399 rc = bnx2x_init_hw_func_cnic(bp);
2400 if (rc)
2401 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2402
2403 bnx2x_nic_init_cnic(bp);
2404
ad5afc89
AE
2405 if (IS_PF(bp)) {
2406 /* Enable Timer scan */
2407 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2408
2409 /* setup cnic queues */
2410 for_each_cnic_queue(bp, i) {
2411 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2412 if (rc) {
2413 BNX2X_ERR("Queue setup failed\n");
2414 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2415 }
55c11941
MS
2416 }
2417 }
2418
2419 /* Initialize Rx filter. */
2420 netif_addr_lock_bh(bp->dev);
2421 bnx2x_set_rx_mode(bp->dev);
2422 netif_addr_unlock_bh(bp->dev);
2423
2424 /* re-read iscsi info */
2425 bnx2x_get_iscsi_info(bp);
2426 bnx2x_setup_cnic_irq_info(bp);
2427 bnx2x_setup_cnic_info(bp);
2428 bp->cnic_loaded = true;
2429 if (bp->state == BNX2X_STATE_OPEN)
2430 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2431
2432
2433 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2434
2435 return 0;
2436
2437#ifndef BNX2X_STOP_ON_ERROR
2438load_error_cnic2:
2439 /* Disable Timer scan */
2440 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2441
2442load_error_cnic1:
2443 bnx2x_napi_disable_cnic(bp);
2444 /* Update the number of queues without the cnic queues */
2445 rc = bnx2x_set_real_num_queues(bp, 0);
2446 if (rc)
2447 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2448load_error_cnic0:
2449 BNX2X_ERR("CNIC-related load failed\n");
2450 bnx2x_free_fp_mem_cnic(bp);
2451 bnx2x_free_mem_cnic(bp);
2452 return rc;
2453#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2454}
2455
9f6c9258
DK
2456/* must be called with rtnl_lock */
2457int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2458{
619c5cb6 2459 int port = BP_PORT(bp);
ad5afc89 2460 int i, rc = 0, load_code = 0;
9f6c9258 2461
55c11941
MS
2462 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2463 DP(NETIF_MSG_IFUP,
2464 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2465
9f6c9258 2466#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2467 if (unlikely(bp->panic)) {
2468 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2469 return -EPERM;
51c1a580 2470 }
9f6c9258
DK
2471#endif
2472
2473 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2474
2ae17f66
VZ
2475 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2476 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2477 &bp->last_reported_link.link_report_flags);
2ae17f66 2478
ad5afc89
AE
2479 if (IS_PF(bp))
2480 /* must be called before memory allocation and HW init */
2481 bnx2x_ilt_set_info(bp);
523224a3 2482
6383c0b3
AE
2483 /*
2484 * Zero fastpath structures preserving invariants like napi, which are
2485 * allocated only once, fp index, max_cos, bp pointer.
65565884 2486 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2487 */
51c1a580 2488 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2489 for_each_queue(bp, i)
2490 bnx2x_bz_fp(bp, i);
55c11941
MS
2491 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2492 bp->num_cnic_queues) *
2493 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2494
55c11941 2495 bp->fcoe_init = false;
6383c0b3 2496
a8c94b91
VZ
2497 /* Set the receive queues buffer size */
2498 bnx2x_set_rx_buf_size(bp);
2499
ad5afc89
AE
2500 if (IS_PF(bp)) {
2501 rc = bnx2x_alloc_mem(bp);
2502 if (rc) {
2503 BNX2X_ERR("Unable to allocate bp memory\n");
2504 return rc;
2505 }
2506 }
2507
2508 /* Allocated memory for FW statistics */
2509 if (bnx2x_alloc_fw_stats_mem(bp))
2510 LOAD_ERROR_EXIT(bp, load_error0);
2511
2512 /* need to be done after alloc mem, since it's self adjusting to amount
2513 * of memory available for RSS queues
2514 */
2515 rc = bnx2x_alloc_fp_mem(bp);
2516 if (rc) {
2517 BNX2X_ERR("Unable to allocate memory for fps\n");
2518 LOAD_ERROR_EXIT(bp, load_error0);
2519 }
d6214d7a 2520
8d9ac297
AE
2521 /* request pf to initialize status blocks */
2522 if (IS_VF(bp)) {
2523 rc = bnx2x_vfpf_init(bp);
2524 if (rc)
2525 LOAD_ERROR_EXIT(bp, load_error0);
2526 }
2527
b3b83c3f
DK
2528 /* As long as bnx2x_alloc_mem() may possibly update
2529 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2530 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2531 */
55c11941 2532 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2533 if (rc) {
ec6ba945 2534 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2535 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2536 }
2537
6383c0b3
AE
2538 /* configure multi cos mappings in kernel.
2539 * this configuration may be overriden by a multi class queue discipline
2540 * or by a dcbx negotiation result.
2541 */
2542 bnx2x_setup_tc(bp->dev, bp->max_cos);
2543
26614ba5
MS
2544 /* Add all NAPI objects */
2545 bnx2x_add_all_napi(bp);
55c11941 2546 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2547 bnx2x_napi_enable(bp);
2548
ad5afc89
AE
2549 if (IS_PF(bp)) {
2550 /* set pf load just before approaching the MCP */
2551 bnx2x_set_pf_load(bp);
2552
2553 /* if mcp exists send load request and analyze response */
2554 if (!BP_NOMCP(bp)) {
2555 /* attempt to load pf */
2556 rc = bnx2x_nic_load_request(bp, &load_code);
2557 if (rc)
2558 LOAD_ERROR_EXIT(bp, load_error1);
2559
2560 /* what did mcp say? */
2561 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2562 if (rc) {
2563 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2564 LOAD_ERROR_EXIT(bp, load_error2);
2565 }
ad5afc89
AE
2566 } else {
2567 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2568 }
9f6c9258 2569
ad5afc89
AE
2570 /* mark pmf if applicable */
2571 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2572
ad5afc89
AE
2573 /* Init Function state controlling object */
2574 bnx2x__init_func_obj(bp);
6383c0b3 2575
ad5afc89
AE
2576 /* Initialize HW */
2577 rc = bnx2x_init_hw(bp, load_code);
2578 if (rc) {
2579 BNX2X_ERR("HW init failed, aborting\n");
2580 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2581 LOAD_ERROR_EXIT(bp, load_error2);
2582 }
9f6c9258
DK
2583 }
2584
ecf01c22
YM
2585 bnx2x_pre_irq_nic_init(bp);
2586
d6214d7a
DK
2587 /* Connect to IRQs */
2588 rc = bnx2x_setup_irqs(bp);
523224a3 2589 if (rc) {
ad5afc89
AE
2590 BNX2X_ERR("setup irqs failed\n");
2591 if (IS_PF(bp))
2592 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2593 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2594 }
2595
619c5cb6 2596 /* Init per-function objects */
ad5afc89 2597 if (IS_PF(bp)) {
ecf01c22
YM
2598 /* Setup NIC internals and enable interrupts */
2599 bnx2x_post_irq_nic_init(bp, load_code);
2600
ad5afc89 2601 bnx2x_init_bp_objs(bp);
b56e9670 2602 bnx2x_iov_nic_init(bp);
a3348722 2603
ad5afc89
AE
2604 /* Set AFEX default VLAN tag to an invalid value */
2605 bp->afex_def_vlan_tag = -1;
2606 bnx2x_nic_load_afex_dcc(bp, load_code);
2607 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2608 rc = bnx2x_func_start(bp);
2609 if (rc) {
2610 BNX2X_ERR("Function start failed!\n");
2611 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2612
619c5cb6 2613 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2614 }
9f6c9258 2615
ad5afc89
AE
2616 /* Send LOAD_DONE command to MCP */
2617 if (!BP_NOMCP(bp)) {
2618 load_code = bnx2x_fw_command(bp,
2619 DRV_MSG_CODE_LOAD_DONE, 0);
2620 if (!load_code) {
2621 BNX2X_ERR("MCP response failure, aborting\n");
2622 rc = -EBUSY;
2623 LOAD_ERROR_EXIT(bp, load_error3);
2624 }
2625 }
9f6c9258 2626
0c14e5ce
AE
2627 /* initialize FW coalescing state machines in RAM */
2628 bnx2x_update_coalesce(bp);
2629
ad5afc89
AE
2630 /* setup the leading queue */
2631 rc = bnx2x_setup_leading(bp);
51c1a580 2632 if (rc) {
ad5afc89 2633 BNX2X_ERR("Setup leading failed!\n");
55c11941 2634 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2635 }
523224a3 2636
ad5afc89
AE
2637 /* set up the rest of the queues */
2638 for_each_nondefault_eth_queue(bp, i) {
2639 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2640 if (rc) {
2641 BNX2X_ERR("Queue setup failed\n");
2642 LOAD_ERROR_EXIT(bp, load_error3);
2643 }
2644 }
2645
2646 /* setup rss */
2647 rc = bnx2x_init_rss_pf(bp);
2648 if (rc) {
2649 BNX2X_ERR("PF RSS init failed\n");
2650 LOAD_ERROR_EXIT(bp, load_error3);
2651 }
8d9ac297
AE
2652
2653 } else { /* vf */
2654 for_each_eth_queue(bp, i) {
2655 rc = bnx2x_vfpf_setup_q(bp, i);
2656 if (rc) {
2657 BNX2X_ERR("Queue setup failed\n");
2658 LOAD_ERROR_EXIT(bp, load_error3);
2659 }
2660 }
51c1a580 2661 }
619c5cb6 2662
523224a3
DK
2663 /* Now when Clients are configured we are ready to work */
2664 bp->state = BNX2X_STATE_OPEN;
2665
619c5cb6 2666 /* Configure a ucast MAC */
ad5afc89
AE
2667 if (IS_PF(bp))
2668 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2669 else /* vf */
f8f4f61a
DK
2670 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2671 true);
51c1a580
MS
2672 if (rc) {
2673 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2674 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2675 }
6e30dd4e 2676
ad5afc89 2677 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2678 bnx2x_update_max_mf_config(bp, bp->pending_max);
2679 bp->pending_max = 0;
2680 }
2681
ad5afc89
AE
2682 if (bp->port.pmf) {
2683 rc = bnx2x_initial_phy_init(bp, load_mode);
2684 if (rc)
2685 LOAD_ERROR_EXIT(bp, load_error3);
2686 }
c63da990 2687 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2688
619c5cb6
VZ
2689 /* Start fast path */
2690
2691 /* Initialize Rx filter. */
2692 netif_addr_lock_bh(bp->dev);
6e30dd4e 2693 bnx2x_set_rx_mode(bp->dev);
619c5cb6 2694 netif_addr_unlock_bh(bp->dev);
6e30dd4e 2695
619c5cb6 2696 /* Start the Tx */
9f6c9258
DK
2697 switch (load_mode) {
2698 case LOAD_NORMAL:
523224a3
DK
2699 /* Tx queue should be only reenabled */
2700 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2701 break;
2702
2703 case LOAD_OPEN:
2704 netif_tx_start_all_queues(bp->dev);
523224a3 2705 smp_mb__after_clear_bit();
9f6c9258
DK
2706 break;
2707
2708 case LOAD_DIAG:
8970b2e4 2709 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2710 bp->state = BNX2X_STATE_DIAG;
2711 break;
2712
2713 default:
2714 break;
2715 }
2716
00253a8c 2717 if (bp->port.pmf)
4c704899 2718 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2719 else
9f6c9258
DK
2720 bnx2x__link_status_update(bp);
2721
2722 /* start the timer */
2723 mod_timer(&bp->timer, jiffies + bp->current_interval);
2724
55c11941
MS
2725 if (CNIC_ENABLED(bp))
2726 bnx2x_load_cnic(bp);
9f6c9258 2727
ad5afc89
AE
2728 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2729 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2730 u32 val;
2731 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2732 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2733 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2734 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2735 }
2736
619c5cb6 2737 /* Wait for all pending SP commands to complete */
ad5afc89 2738 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2739 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2740 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2741 return -EBUSY;
2742 }
6891dd25 2743
9876879f
BW
2744 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2745 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2746 bnx2x_dcbx_init(bp, false);
2747
55c11941
MS
2748 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2749
9f6c9258
DK
2750 return 0;
2751
619c5cb6 2752#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2753load_error3:
ad5afc89
AE
2754 if (IS_PF(bp)) {
2755 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2756
ad5afc89
AE
2757 /* Clean queueable objects */
2758 bnx2x_squeeze_objects(bp);
2759 }
619c5cb6 2760
9f6c9258
DK
2761 /* Free SKBs, SGEs, TPA pool and driver internals */
2762 bnx2x_free_skbs(bp);
ec6ba945 2763 for_each_rx_queue(bp, i)
9f6c9258 2764 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2765
9f6c9258 2766 /* Release IRQs */
d6214d7a
DK
2767 bnx2x_free_irq(bp);
2768load_error2:
ad5afc89 2769 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2770 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2771 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2772 }
2773
2774 bp->port.pmf = 0;
9f6c9258
DK
2775load_error1:
2776 bnx2x_napi_disable(bp);
722c6f58 2777 bnx2x_del_all_napi(bp);
ad5afc89 2778
889b9af3 2779 /* clear pf_load status, as it was already set */
ad5afc89
AE
2780 if (IS_PF(bp))
2781 bnx2x_clear_pf_load(bp);
d6214d7a 2782load_error0:
ad5afc89
AE
2783 bnx2x_free_fp_mem(bp);
2784 bnx2x_free_fw_stats_mem(bp);
9f6c9258
DK
2785 bnx2x_free_mem(bp);
2786
2787 return rc;
619c5cb6 2788#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2789}
2790
7fa6f340 2791int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2792{
2793 u8 rc = 0, cos, i;
2794
2795 /* Wait until tx fastpath tasks complete */
2796 for_each_tx_queue(bp, i) {
2797 struct bnx2x_fastpath *fp = &bp->fp[i];
2798
2799 for_each_cos_in_tx_queue(fp, cos)
2800 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2801 if (rc)
2802 return rc;
2803 }
2804 return 0;
2805}
2806
9f6c9258 2807/* must be called with rtnl_lock */
5d07d868 2808int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2809{
2810 int i;
c9ee9206
VZ
2811 bool global = false;
2812
55c11941
MS
2813 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2814
9ce392d4 2815 /* mark driver is unloaded in shmem2 */
ad5afc89 2816 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2817 u32 val;
2818 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2819 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2820 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2821 }
2822
80bfe5cc 2823 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2824 (bp->state == BNX2X_STATE_CLOSED ||
2825 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2826 /* We can get here if the driver has been unloaded
2827 * during parity error recovery and is either waiting for a
2828 * leader to complete or for other functions to unload and
2829 * then ifdown has been issued. In this case we want to
2830 * unload and let other functions to complete a recovery
2831 * process.
2832 */
9f6c9258
DK
2833 bp->recovery_state = BNX2X_RECOVERY_DONE;
2834 bp->is_leader = 0;
c9ee9206
VZ
2835 bnx2x_release_leader_lock(bp);
2836 smp_mb();
2837
51c1a580
MS
2838 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2839 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2840 return -EINVAL;
2841 }
2842
80bfe5cc
YM
2843 /* Nothing to do during unload if previous bnx2x_nic_load()
2844 * have not completed succesfully - all resourses are released.
2845 *
2846 * we can get here only after unsuccessful ndo_* callback, during which
2847 * dev->IFF_UP flag is still on.
2848 */
2849 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2850 return 0;
2851
2852 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2853 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2854 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2855 */
2856 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2857 smp_mb();
2858
55c11941
MS
2859 if (CNIC_LOADED(bp))
2860 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2861
9505ee37
VZ
2862 /* Stop Tx */
2863 bnx2x_tx_disable(bp);
65565884 2864 netdev_reset_tc(bp->dev);
9505ee37 2865
9f6c9258 2866 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2867
9f6c9258 2868 del_timer_sync(&bp->timer);
f85582f8 2869
ad5afc89
AE
2870 if (IS_PF(bp)) {
2871 /* Set ALWAYS_ALIVE bit in shmem */
2872 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2873 bnx2x_drv_pulse(bp);
2874 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2875 bnx2x_save_statistics(bp);
2876 }
9f6c9258 2877
ad5afc89
AE
2878 /* wait till consumers catch up with producers in all queues */
2879 bnx2x_drain_tx_queues(bp);
9f6c9258 2880
9b176b6b
AE
2881 /* if VF indicate to PF this function is going down (PF will delete sp
2882 * elements and clear initializations
2883 */
2884 if (IS_VF(bp))
2885 bnx2x_vfpf_close_vf(bp);
2886 else if (unload_mode != UNLOAD_RECOVERY)
2887 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2888 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2889 else {
c9ee9206
VZ
2890 /* Send the UNLOAD_REQUEST to the MCP */
2891 bnx2x_send_unload_req(bp, unload_mode);
2892
2893 /*
2894 * Prevent transactions to host from the functions on the
2895 * engine that doesn't reset global blocks in case of global
2896 * attention once gloabl blocks are reset and gates are opened
2897 * (the engine which leader will perform the recovery
2898 * last).
2899 */
2900 if (!CHIP_IS_E1x(bp))
2901 bnx2x_pf_disable(bp);
2902
2903 /* Disable HW interrupts, NAPI */
523224a3 2904 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2905 /* Delete all NAPI objects */
2906 bnx2x_del_all_napi(bp);
55c11941
MS
2907 if (CNIC_LOADED(bp))
2908 bnx2x_del_all_napi_cnic(bp);
523224a3 2909 /* Release IRQs */
d6214d7a 2910 bnx2x_free_irq(bp);
c9ee9206
VZ
2911
2912 /* Report UNLOAD_DONE to MCP */
5d07d868 2913 bnx2x_send_unload_done(bp, false);
523224a3 2914 }
9f6c9258 2915
619c5cb6
VZ
2916 /*
2917 * At this stage no more interrupts will arrive so we may safly clean
2918 * the queueable objects here in case they failed to get cleaned so far.
2919 */
ad5afc89
AE
2920 if (IS_PF(bp))
2921 bnx2x_squeeze_objects(bp);
619c5cb6 2922
79616895
VZ
2923 /* There should be no more pending SP commands at this stage */
2924 bp->sp_state = 0;
2925
9f6c9258
DK
2926 bp->port.pmf = 0;
2927
2928 /* Free SKBs, SGEs, TPA pool and driver internals */
2929 bnx2x_free_skbs(bp);
55c11941
MS
2930 if (CNIC_LOADED(bp))
2931 bnx2x_free_skbs_cnic(bp);
ec6ba945 2932 for_each_rx_queue(bp, i)
9f6c9258 2933 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2934
ad5afc89
AE
2935 bnx2x_free_fp_mem(bp);
2936 if (CNIC_LOADED(bp))
55c11941 2937 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2938
ad5afc89 2939 if (IS_PF(bp)) {
ad5afc89
AE
2940 if (CNIC_LOADED(bp))
2941 bnx2x_free_mem_cnic(bp);
2f7a3122 2942 bnx2x_free_mem(bp);
ad5afc89 2943 }
9f6c9258 2944 bp->state = BNX2X_STATE_CLOSED;
55c11941 2945 bp->cnic_loaded = false;
9f6c9258 2946
c9ee9206
VZ
2947 /* Check if there are pending parity attentions. If there are - set
2948 * RECOVERY_IN_PROGRESS.
2949 */
ad5afc89 2950 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2951 bnx2x_set_reset_in_progress(bp);
2952
2953 /* Set RESET_IS_GLOBAL if needed */
2954 if (global)
2955 bnx2x_set_reset_global(bp);
2956 }
2957
2958
9f6c9258
DK
2959 /* The last driver must disable a "close the gate" if there is no
2960 * parity attention or "process kill" pending.
2961 */
ad5afc89
AE
2962 if (IS_PF(bp) &&
2963 !bnx2x_clear_pf_load(bp) &&
2964 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2965 bnx2x_disable_close_the_gate(bp);
2966
55c11941
MS
2967 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2968
9f6c9258
DK
2969 return 0;
2970}
f85582f8 2971
9f6c9258
DK
2972int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2973{
2974 u16 pmcsr;
2975
adf5f6a1
DK
2976 /* If there is no power capability, silently succeed */
2977 if (!bp->pm_cap) {
51c1a580 2978 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
2979 return 0;
2980 }
2981
9f6c9258
DK
2982 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2983
2984 switch (state) {
2985 case PCI_D0:
2986 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2987 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2988 PCI_PM_CTRL_PME_STATUS));
2989
2990 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2991 /* delay required during transition out of D3hot */
2992 msleep(20);
2993 break;
2994
2995 case PCI_D3hot:
2996 /* If there are other clients above don't
2997 shut down the power */
2998 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2999 return 0;
3000 /* Don't shut down the power for emulation and FPGA */
3001 if (CHIP_REV_IS_SLOW(bp))
3002 return 0;
3003
3004 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3005 pmcsr |= 3;
3006
3007 if (bp->wol)
3008 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3009
3010 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3011 pmcsr);
3012
3013 /* No more memory access after this point until
3014 * device is brought back to D0.
3015 */
3016 break;
3017
3018 default:
51c1a580 3019 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3020 return -EINVAL;
3021 }
3022 return 0;
3023}
3024
9f6c9258
DK
3025/*
3026 * net_device service functions
3027 */
d6214d7a 3028int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3029{
3030 int work_done = 0;
6383c0b3 3031 u8 cos;
9f6c9258
DK
3032 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3033 napi);
3034 struct bnx2x *bp = fp->bp;
3035
3036 while (1) {
3037#ifdef BNX2X_STOP_ON_ERROR
3038 if (unlikely(bp->panic)) {
3039 napi_complete(napi);
3040 return 0;
3041 }
3042#endif
3043
6383c0b3 3044 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3045 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3046 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3047
9f6c9258
DK
3048 if (bnx2x_has_rx_work(fp)) {
3049 work_done += bnx2x_rx_int(fp, budget - work_done);
3050
3051 /* must not complete if we consumed full budget */
3052 if (work_done >= budget)
3053 break;
3054 }
3055
3056 /* Fall out from the NAPI loop if needed */
3057 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3058
ec6ba945
VZ
3059 /* No need to update SB for FCoE L2 ring as long as
3060 * it's connected to the default SB and the SB
3061 * has been updated when NAPI was scheduled.
3062 */
3063 if (IS_FCOE_FP(fp)) {
3064 napi_complete(napi);
3065 break;
3066 }
9f6c9258 3067 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3068 /* bnx2x_has_rx_work() reads the status block,
3069 * thus we need to ensure that status block indices
3070 * have been actually read (bnx2x_update_fpsb_idx)
3071 * prior to this check (bnx2x_has_rx_work) so that
3072 * we won't write the "newer" value of the status block
3073 * to IGU (if there was a DMA right after
3074 * bnx2x_has_rx_work and if there is no rmb, the memory
3075 * reading (bnx2x_update_fpsb_idx) may be postponed
3076 * to right before bnx2x_ack_sb). In this case there
3077 * will never be another interrupt until there is
3078 * another update of the status block, while there
3079 * is still unhandled work.
3080 */
9f6c9258
DK
3081 rmb();
3082
3083 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3084 napi_complete(napi);
3085 /* Re-enable interrupts */
51c1a580 3086 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3087 "Update index to %d\n", fp->fp_hc_idx);
3088 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3089 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3090 IGU_INT_ENABLE, 1);
3091 break;
3092 }
3093 }
3094 }
3095
3096 return work_done;
3097}
3098
9f6c9258
DK
3099/* we split the first BD into headers and data BDs
3100 * to ease the pain of our fellow microcode engineers
3101 * we use one mapping for both BDs
9f6c9258 3102 */
91226790
DK
3103static u16 bnx2x_tx_split(struct bnx2x *bp,
3104 struct bnx2x_fp_txdata *txdata,
3105 struct sw_tx_bd *tx_buf,
3106 struct eth_tx_start_bd **tx_bd, u16 hlen,
3107 u16 bd_prod)
9f6c9258
DK
3108{
3109 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3110 struct eth_tx_bd *d_tx_bd;
3111 dma_addr_t mapping;
3112 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3113
3114 /* first fix first BD */
9f6c9258
DK
3115 h_tx_bd->nbytes = cpu_to_le16(hlen);
3116
91226790
DK
3117 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3118 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3119
3120 /* now get a new data BD
3121 * (after the pbd) and fill it */
3122 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3123 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3124
3125 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3126 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3127
3128 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3129 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3130 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3131
3132 /* this marks the BD as one that has no individual mapping */
3133 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3134
3135 DP(NETIF_MSG_TX_QUEUED,
3136 "TSO split data size is %d (%x:%x)\n",
3137 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3138
3139 /* update tx_bd */
3140 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3141
3142 return bd_prod;
3143}
3144
86564c3f
YM
3145#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3146#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3147static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3148{
86564c3f
YM
3149 __sum16 tsum = (__force __sum16) csum;
3150
9f6c9258 3151 if (fix > 0)
86564c3f
YM
3152 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3153 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3154
3155 else if (fix < 0)
86564c3f
YM
3156 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3157 csum_partial(t_header, -fix, 0)));
9f6c9258 3158
e2593fcd 3159 return bswab16(tsum);
9f6c9258
DK
3160}
3161
91226790 3162static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3163{
3164 u32 rc;
a848ade4
DK
3165 __u8 prot = 0;
3166 __be16 protocol;
9f6c9258
DK
3167
3168 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3169 return XMIT_PLAIN;
9f6c9258 3170
a848ade4
DK
3171 protocol = vlan_get_protocol(skb);
3172 if (protocol == htons(ETH_P_IPV6)) {
3173 rc = XMIT_CSUM_V6;
3174 prot = ipv6_hdr(skb)->nexthdr;
3175 } else {
3176 rc = XMIT_CSUM_V4;
3177 prot = ip_hdr(skb)->protocol;
3178 }
9f6c9258 3179
a848ade4
DK
3180 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3181 if (inner_ip_hdr(skb)->version == 6) {
3182 rc |= XMIT_CSUM_ENC_V6;
3183 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3184 rc |= XMIT_CSUM_TCP;
9f6c9258 3185 } else {
a848ade4
DK
3186 rc |= XMIT_CSUM_ENC_V4;
3187 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3188 rc |= XMIT_CSUM_TCP;
3189 }
3190 }
a848ade4
DK
3191 if (prot == IPPROTO_TCP)
3192 rc |= XMIT_CSUM_TCP;
9f6c9258 3193
a848ade4
DK
3194 if (skb_is_gso_v6(skb)) {
3195 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
3196 if (rc & XMIT_CSUM_ENC)
3197 rc |= XMIT_GSO_ENC_V6;
3198 } else if (skb_is_gso(skb)) {
3199 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
3200 if (rc & XMIT_CSUM_ENC)
3201 rc |= XMIT_GSO_ENC_V4;
3202 }
9f6c9258
DK
3203
3204 return rc;
3205}
3206
3207#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3208/* check if packet requires linearization (packet is too fragmented)
3209 no need to check fragmentation if page size > 8K (there will be no
3210 violation to FW restrictions) */
3211static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3212 u32 xmit_type)
3213{
3214 int to_copy = 0;
3215 int hlen = 0;
3216 int first_bd_sz = 0;
3217
3218 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3219 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3220
3221 if (xmit_type & XMIT_GSO) {
3222 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3223 /* Check if LSO packet needs to be copied:
3224 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3225 int wnd_size = MAX_FETCH_BD - 3;
3226 /* Number of windows to check */
3227 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3228 int wnd_idx = 0;
3229 int frag_idx = 0;
3230 u32 wnd_sum = 0;
3231
3232 /* Headers length */
3233 hlen = (int)(skb_transport_header(skb) - skb->data) +
3234 tcp_hdrlen(skb);
3235
3236 /* Amount of data (w/o headers) on linear part of SKB*/
3237 first_bd_sz = skb_headlen(skb) - hlen;
3238
3239 wnd_sum = first_bd_sz;
3240
3241 /* Calculate the first sum - it's special */
3242 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3243 wnd_sum +=
9e903e08 3244 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3245
3246 /* If there was data on linear skb data - check it */
3247 if (first_bd_sz > 0) {
3248 if (unlikely(wnd_sum < lso_mss)) {
3249 to_copy = 1;
3250 goto exit_lbl;
3251 }
3252
3253 wnd_sum -= first_bd_sz;
3254 }
3255
3256 /* Others are easier: run through the frag list and
3257 check all windows */
3258 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3259 wnd_sum +=
9e903e08 3260 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3261
3262 if (unlikely(wnd_sum < lso_mss)) {
3263 to_copy = 1;
3264 break;
3265 }
3266 wnd_sum -=
9e903e08 3267 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3268 }
3269 } else {
3270 /* in non-LSO too fragmented packet should always
3271 be linearized */
3272 to_copy = 1;
3273 }
3274 }
3275
3276exit_lbl:
3277 if (unlikely(to_copy))
3278 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3279 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3280 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3281 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3282
3283 return to_copy;
3284}
3285#endif
3286
91226790
DK
3287static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3288 u32 xmit_type)
f2e0899f 3289{
a848ade4
DK
3290 struct ipv6hdr *ipv6;
3291
2297a2da
VZ
3292 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3293 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3294 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3295
3296 if (xmit_type & XMIT_GSO_ENC_V6)
3297 ipv6 = inner_ipv6_hdr(skb);
3298 else if (xmit_type & XMIT_GSO_V6)
3299 ipv6 = ipv6_hdr(skb);
3300 else
3301 ipv6 = NULL;
3302
3303 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3304 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3305}
3306
3307/**
e8920674 3308 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3309 *
e8920674
DK
3310 * @skb: packet skb
3311 * @pbd: parse BD
3312 * @xmit_type: xmit flags
f2e0899f 3313 */
91226790
DK
3314static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3315 struct eth_tx_parse_bd_e1x *pbd,
3316 u32 xmit_type)
f2e0899f
DK
3317{
3318 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3319 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3320 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3321
3322 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3323 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3324 pbd->tcp_pseudo_csum =
86564c3f
YM
3325 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3326 ip_hdr(skb)->daddr,
3327 0, IPPROTO_TCP, 0));
f2e0899f
DK
3328
3329 } else
3330 pbd->tcp_pseudo_csum =
86564c3f
YM
3331 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3332 &ipv6_hdr(skb)->daddr,
3333 0, IPPROTO_TCP, 0));
f2e0899f 3334
86564c3f
YM
3335 pbd->global_data |=
3336 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3337}
f85582f8 3338
a848ade4
DK
3339/**
3340 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3341 *
3342 * @bp: driver handle
3343 * @skb: packet skb
3344 * @parsing_data: data to be updated
3345 * @xmit_type: xmit flags
3346 *
3347 * 57712/578xx related, when skb has encapsulation
3348 */
3349static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3350 u32 *parsing_data, u32 xmit_type)
3351{
3352 *parsing_data |=
3353 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3354 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3355 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3356
3357 if (xmit_type & XMIT_CSUM_TCP) {
3358 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3359 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3360 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3361
3362 return skb_inner_transport_header(skb) +
3363 inner_tcp_hdrlen(skb) - skb->data;
3364 }
3365
3366 /* We support checksum offload for TCP and UDP only.
3367 * No need to pass the UDP header length - it's a constant.
3368 */
3369 return skb_inner_transport_header(skb) +
3370 sizeof(struct udphdr) - skb->data;
3371}
3372
f2e0899f 3373/**
e8920674 3374 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3375 *
e8920674
DK
3376 * @bp: driver handle
3377 * @skb: packet skb
3378 * @parsing_data: data to be updated
3379 * @xmit_type: xmit flags
f2e0899f 3380 *
91226790 3381 * 57712/578xx related
f2e0899f 3382 */
91226790
DK
3383static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3384 u32 *parsing_data, u32 xmit_type)
f2e0899f 3385{
e39aece7 3386 *parsing_data |=
2de67439 3387 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3388 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3389 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3390
e39aece7
VZ
3391 if (xmit_type & XMIT_CSUM_TCP) {
3392 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3393 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3394 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3395
e39aece7 3396 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3397 }
3398 /* We support checksum offload for TCP and UDP only.
3399 * No need to pass the UDP header length - it's a constant.
3400 */
3401 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3402}
3403
a848ade4 3404/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3405static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3406 struct eth_tx_start_bd *tx_start_bd,
3407 u32 xmit_type)
93ef5c02 3408{
93ef5c02
DK
3409 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3410
a848ade4 3411 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3412 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3413
3414 if (!(xmit_type & XMIT_CSUM_TCP))
3415 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3416}
3417
f2e0899f 3418/**
e8920674 3419 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3420 *
e8920674
DK
3421 * @bp: driver handle
3422 * @skb: packet skb
3423 * @pbd: parse BD to be updated
3424 * @xmit_type: xmit flags
f2e0899f 3425 */
91226790
DK
3426static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3427 struct eth_tx_parse_bd_e1x *pbd,
3428 u32 xmit_type)
f2e0899f 3429{
e39aece7 3430 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3431
3432 /* for now NS flag is not used in Linux */
3433 pbd->global_data =
86564c3f
YM
3434 cpu_to_le16(hlen |
3435 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3436 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3437
3438 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3439 skb_network_header(skb)) >> 1;
f2e0899f 3440
e39aece7
VZ
3441 hlen += pbd->ip_hlen_w;
3442
3443 /* We support checksum offload for TCP and UDP only */
3444 if (xmit_type & XMIT_CSUM_TCP)
3445 hlen += tcp_hdrlen(skb) / 2;
3446 else
3447 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3448
3449 pbd->total_hlen_w = cpu_to_le16(hlen);
3450 hlen = hlen*2;
3451
3452 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3453 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3454
3455 } else {
3456 s8 fix = SKB_CS_OFF(skb); /* signed! */
3457
3458 DP(NETIF_MSG_TX_QUEUED,
3459 "hlen %d fix %d csum before fix %x\n",
3460 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3461
3462 /* HW bug: fixup the CSUM */
3463 pbd->tcp_pseudo_csum =
3464 bnx2x_csum_fix(skb_transport_header(skb),
3465 SKB_CS(skb), fix);
3466
3467 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3468 pbd->tcp_pseudo_csum);
3469 }
3470
3471 return hlen;
3472}
f85582f8 3473
a848ade4
DK
3474static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3475 struct eth_tx_parse_bd_e2 *pbd_e2,
3476 struct eth_tx_parse_2nd_bd *pbd2,
3477 u16 *global_data,
3478 u32 xmit_type)
3479{
e287a75c 3480 u16 hlen_w = 0;
a848ade4 3481 u8 outerip_off, outerip_len = 0;
e287a75c
DK
3482 /* from outer IP to transport */
3483 hlen_w = (skb_inner_transport_header(skb) -
3484 skb_network_header(skb)) >> 1;
a848ade4
DK
3485
3486 /* transport len */
3487 if (xmit_type & XMIT_CSUM_TCP)
e287a75c 3488 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3489 else
e287a75c 3490 hlen_w += sizeof(struct udphdr) >> 1;
a848ade4 3491
e287a75c 3492 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4
DK
3493
3494 if (xmit_type & XMIT_CSUM_ENC_V4) {
e287a75c 3495 struct iphdr *iph = ip_hdr(skb);
a848ade4
DK
3496 pbd2->fw_ip_csum_wo_len_flags_frag =
3497 bswab16(csum_fold((~iph->check) -
3498 iph->tot_len - iph->frag_off));
3499 } else {
3500 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3501 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3502 }
3503
3504 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3505
3506 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3507
3508 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3509 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3510
3511 pbd_e2->data.tunnel_data.pseudo_csum =
3512 bswab16(~csum_tcpudp_magic(
3513 inner_ip_hdr(skb)->saddr,
3514 inner_ip_hdr(skb)->daddr,
3515 0, IPPROTO_TCP, 0));
3516
3517 outerip_len = ip_hdr(skb)->ihl << 1;
3518 } else {
3519 pbd_e2->data.tunnel_data.pseudo_csum =
3520 bswab16(~csum_ipv6_magic(
3521 &inner_ipv6_hdr(skb)->saddr,
3522 &inner_ipv6_hdr(skb)->daddr,
3523 0, IPPROTO_TCP, 0));
3524 }
3525
3526 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3527
3528 *global_data |=
3529 outerip_off |
3530 (!!(xmit_type & XMIT_CSUM_V6) <<
3531 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3532 (outerip_len <<
3533 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3534 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3535 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3536
3537 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3538 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3539 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3540 }
a848ade4
DK
3541}
3542
9f6c9258
DK
3543/* called with netif_tx_lock
3544 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3545 * netif_wake_queue()
3546 */
3547netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3548{
3549 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3550
9f6c9258 3551 struct netdev_queue *txq;
6383c0b3 3552 struct bnx2x_fp_txdata *txdata;
9f6c9258 3553 struct sw_tx_bd *tx_buf;
619c5cb6 3554 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3555 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3556 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3557 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3558 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3559 u32 pbd_e2_parsing_data = 0;
9f6c9258 3560 u16 pkt_prod, bd_prod;
65565884 3561 int nbd, txq_index;
9f6c9258
DK
3562 dma_addr_t mapping;
3563 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3564 int i;
3565 u8 hlen = 0;
3566 __le16 pkt_size = 0;
3567 struct ethhdr *eth;
3568 u8 mac_type = UNICAST_ADDRESS;
3569
3570#ifdef BNX2X_STOP_ON_ERROR
3571 if (unlikely(bp->panic))
3572 return NETDEV_TX_BUSY;
3573#endif
3574
6383c0b3
AE
3575 txq_index = skb_get_queue_mapping(skb);
3576 txq = netdev_get_tx_queue(dev, txq_index);
3577
55c11941 3578 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3579
65565884 3580 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3581
3582 /* enable this debug print to view the transmission queue being used
51c1a580 3583 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3584 txq_index, fp_index, txdata_index); */
9f6c9258 3585
6383c0b3 3586 /* enable this debug print to view the tranmission details
51c1a580
MS
3587 DP(NETIF_MSG_TX_QUEUED,
3588 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3589 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3590
6383c0b3 3591 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3592 skb_shinfo(skb)->nr_frags +
3593 BDS_PER_TX_PKT +
3594 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3595 /* Handle special storage cases separately */
c96bdc0c
DK
3596 if (txdata->tx_ring_size == 0) {
3597 struct bnx2x_eth_q_stats *q_stats =
3598 bnx2x_fp_qstats(bp, txdata->parent_fp);
3599 q_stats->driver_filtered_tx_pkt++;
3600 dev_kfree_skb(skb);
3601 return NETDEV_TX_OK;
3602 }
2de67439
YM
3603 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3604 netif_tx_stop_queue(txq);
c96bdc0c 3605 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3606
9f6c9258
DK
3607 return NETDEV_TX_BUSY;
3608 }
3609
51c1a580 3610 DP(NETIF_MSG_TX_QUEUED,
04c46736 3611 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3612 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3613 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3614 skb->len);
9f6c9258
DK
3615
3616 eth = (struct ethhdr *)skb->data;
3617
3618 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3619 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3620 if (is_broadcast_ether_addr(eth->h_dest))
3621 mac_type = BROADCAST_ADDRESS;
3622 else
3623 mac_type = MULTICAST_ADDRESS;
3624 }
3625
91226790 3626#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3627 /* First, check if we need to linearize the skb (due to FW
3628 restrictions). No need to check fragmentation if page size > 8K
3629 (there will be no violation to FW restrictions) */
3630 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3631 /* Statistics of linearization */
3632 bp->lin_cnt++;
3633 if (skb_linearize(skb) != 0) {
51c1a580
MS
3634 DP(NETIF_MSG_TX_QUEUED,
3635 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3636 dev_kfree_skb_any(skb);
3637 return NETDEV_TX_OK;
3638 }
3639 }
3640#endif
619c5cb6
VZ
3641 /* Map skb linear data for DMA */
3642 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3643 skb_headlen(skb), DMA_TO_DEVICE);
3644 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3645 DP(NETIF_MSG_TX_QUEUED,
3646 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3647 dev_kfree_skb_any(skb);
3648 return NETDEV_TX_OK;
3649 }
9f6c9258
DK
3650 /*
3651 Please read carefully. First we use one BD which we mark as start,
3652 then we have a parsing info BD (used for TSO or xsum),
3653 and only then we have the rest of the TSO BDs.
3654 (don't forget to mark the last one as last,
3655 and to unmap only AFTER you write to the BD ...)
3656 And above all, all pdb sizes are in words - NOT DWORDS!
3657 */
3658
619c5cb6
VZ
3659 /* get current pkt produced now - advance it just before sending packet
3660 * since mapping of pages may fail and cause packet to be dropped
3661 */
6383c0b3
AE
3662 pkt_prod = txdata->tx_pkt_prod;
3663 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3664
619c5cb6
VZ
3665 /* get a tx_buf and first BD
3666 * tx_start_bd may be changed during SPLIT,
3667 * but first_bd will always stay first
3668 */
6383c0b3
AE
3669 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3670 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3671 first_bd = tx_start_bd;
9f6c9258
DK
3672
3673 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3674
91226790
DK
3675 /* header nbd: indirectly zero other flags! */
3676 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3677
3678 /* remember the first BD of the packet */
6383c0b3 3679 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3680 tx_buf->skb = skb;
3681 tx_buf->flags = 0;
3682
3683 DP(NETIF_MSG_TX_QUEUED,
3684 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3685 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3686
eab6d18d 3687 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3688 tx_start_bd->vlan_or_ethertype =
3689 cpu_to_le16(vlan_tx_tag_get(skb));
3690 tx_start_bd->bd_flags.as_bitfield |=
3691 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3692 } else {
3693 /* when transmitting in a vf, start bd must hold the ethertype
3694 * for fw to enforce it
3695 */
91226790 3696 if (IS_VF(bp))
dc1ba591
AE
3697 tx_start_bd->vlan_or_ethertype =
3698 cpu_to_le16(ntohs(eth->h_proto));
91226790 3699 else
dc1ba591
AE
3700 /* used by FW for packet accounting */
3701 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3702 }
9f6c9258 3703
91226790
DK
3704 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3705
9f6c9258
DK
3706 /* turn on parsing and get a BD */
3707 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3708
93ef5c02
DK
3709 if (xmit_type & XMIT_CSUM)
3710 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3711
619c5cb6 3712 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3713 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3714 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3715
3716 if (xmit_type & XMIT_CSUM_ENC) {
3717 u16 global_data = 0;
3718
3719 /* Set PBD in enc checksum offload case */
3720 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3721 &pbd_e2_parsing_data,
3722 xmit_type);
3723
3724 /* turn on 2nd parsing and get a BD */
3725 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3726
3727 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3728
3729 memset(pbd2, 0, sizeof(*pbd2));
3730
3731 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3732 (skb_inner_network_header(skb) -
3733 skb->data) >> 1;
3734
3735 if (xmit_type & XMIT_GSO_ENC)
3736 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3737 &global_data,
3738 xmit_type);
3739
3740 pbd2->global_data = cpu_to_le16(global_data);
3741
3742 /* add addition parse BD indication to start BD */
3743 SET_FLAG(tx_start_bd->general_data,
3744 ETH_TX_START_BD_PARSE_NBDS, 1);
3745 /* set encapsulation flag in start BD */
3746 SET_FLAG(tx_start_bd->general_data,
3747 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3748 nbd++;
3749 } else if (xmit_type & XMIT_CSUM) {
91226790 3750 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3751 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3752 &pbd_e2_parsing_data,
3753 xmit_type);
a848ade4 3754 }
dc1ba591 3755
91226790
DK
3756 /* Add the macs to the parsing BD this is a vf */
3757 if (IS_VF(bp)) {
3758 /* override GRE parameters in BD */
3759 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3760 &pbd_e2->data.mac_addr.src_mid,
3761 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3762 eth->h_source);
91226790
DK
3763
3764 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3765 &pbd_e2->data.mac_addr.dst_mid,
3766 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3767 eth->h_dest);
3768 }
96bed4b9
YM
3769
3770 SET_FLAG(pbd_e2_parsing_data,
3771 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3772 } else {
96bed4b9 3773 u16 global_data = 0;
6383c0b3 3774 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3775 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3776 /* Set PBD in checksum offload case */
3777 if (xmit_type & XMIT_CSUM)
3778 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3779
96bed4b9
YM
3780 SET_FLAG(global_data,
3781 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3782 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3783 }
3784
f85582f8 3785 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3786 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3787 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3788 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3789 pkt_size = tx_start_bd->nbytes;
3790
51c1a580 3791 DP(NETIF_MSG_TX_QUEUED,
91226790 3792 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3793 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3794 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3795 tx_start_bd->bd_flags.as_bitfield,
3796 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3797
3798 if (xmit_type & XMIT_GSO) {
3799
3800 DP(NETIF_MSG_TX_QUEUED,
3801 "TSO packet len %d hlen %d total len %d tso size %d\n",
3802 skb->len, hlen, skb_headlen(skb),
3803 skb_shinfo(skb)->gso_size);
3804
3805 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3806
91226790
DK
3807 if (unlikely(skb_headlen(skb) > hlen)) {
3808 nbd++;
6383c0b3
AE
3809 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3810 &tx_start_bd, hlen,
91226790
DK
3811 bd_prod);
3812 }
619c5cb6 3813 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3814 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3815 xmit_type);
f2e0899f
DK
3816 else
3817 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 3818 }
2297a2da
VZ
3819
3820 /* Set the PBD's parsing_data field if not zero
3821 * (for the chips newer than 57711).
3822 */
3823 if (pbd_e2_parsing_data)
3824 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3825
9f6c9258
DK
3826 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3827
f85582f8 3828 /* Handle fragmented skb */
9f6c9258
DK
3829 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3830 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3831
9e903e08
ED
3832 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3833 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3834 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3835 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3836
51c1a580
MS
3837 DP(NETIF_MSG_TX_QUEUED,
3838 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3839
3840 /* we need unmap all buffers already mapped
3841 * for this SKB;
3842 * first_bd->nbd need to be properly updated
3843 * before call to bnx2x_free_tx_pkt
3844 */
3845 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3846 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3847 TX_BD(txdata->tx_pkt_prod),
3848 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3849 return NETDEV_TX_OK;
3850 }
3851
9f6c9258 3852 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3853 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3854 if (total_pkt_bd == NULL)
6383c0b3 3855 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3856
9f6c9258
DK
3857 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3858 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3859 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3860 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3861 nbd++;
9f6c9258
DK
3862
3863 DP(NETIF_MSG_TX_QUEUED,
3864 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3865 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3866 le16_to_cpu(tx_data_bd->nbytes));
3867 }
3868
3869 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3870
619c5cb6
VZ
3871 /* update with actual num BDs */
3872 first_bd->nbd = cpu_to_le16(nbd);
3873
9f6c9258
DK
3874 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3875
3876 /* now send a tx doorbell, counting the next BD
3877 * if the packet contains or ends with it
3878 */
3879 if (TX_BD_POFF(bd_prod) < nbd)
3880 nbd++;
3881
619c5cb6
VZ
3882 /* total_pkt_bytes should be set on the first data BD if
3883 * it's not an LSO packet and there is more than one
3884 * data BD. In this case pkt_size is limited by an MTU value.
3885 * However we prefer to set it for an LSO packet (while we don't
3886 * have to) in order to save some CPU cycles in a none-LSO
3887 * case, when we much more care about them.
3888 */
9f6c9258
DK
3889 if (total_pkt_bd != NULL)
3890 total_pkt_bd->total_pkt_bytes = pkt_size;
3891
523224a3 3892 if (pbd_e1x)
9f6c9258 3893 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3894 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3895 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3896 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3897 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3898 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3899 if (pbd_e2)
3900 DP(NETIF_MSG_TX_QUEUED,
3901 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
3902 pbd_e2,
3903 pbd_e2->data.mac_addr.dst_hi,
3904 pbd_e2->data.mac_addr.dst_mid,
3905 pbd_e2->data.mac_addr.dst_lo,
3906 pbd_e2->data.mac_addr.src_hi,
3907 pbd_e2->data.mac_addr.src_mid,
3908 pbd_e2->data.mac_addr.src_lo,
f2e0899f 3909 pbd_e2->parsing_data);
9f6c9258
DK
3910 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3911
2df1a70a
TH
3912 netdev_tx_sent_queue(txq, skb->len);
3913
8373c57d
WB
3914 skb_tx_timestamp(skb);
3915
6383c0b3 3916 txdata->tx_pkt_prod++;
9f6c9258
DK
3917 /*
3918 * Make sure that the BD data is updated before updating the producer
3919 * since FW might read the BD right after the producer is updated.
3920 * This is only applicable for weak-ordered memory model archs such
3921 * as IA-64. The following barrier is also mandatory since FW will
3922 * assumes packets must have BDs.
3923 */
3924 wmb();
3925
6383c0b3 3926 txdata->tx_db.data.prod += nbd;
9f6c9258 3927 barrier();
f85582f8 3928
6383c0b3 3929 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
3930
3931 mmiowb();
3932
6383c0b3 3933 txdata->tx_bd_prod += nbd;
9f6c9258 3934
7df2dc6b 3935 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
3936 netif_tx_stop_queue(txq);
3937
3938 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3939 * ordering of set_bit() in netif_tx_stop_queue() and read of
3940 * fp->bd_tx_cons */
3941 smp_mb();
3942
15192a8c 3943 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 3944 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
3945 netif_tx_wake_queue(txq);
3946 }
6383c0b3 3947 txdata->tx_pkt++;
9f6c9258
DK
3948
3949 return NETDEV_TX_OK;
3950}
f85582f8 3951
6383c0b3
AE
3952/**
3953 * bnx2x_setup_tc - routine to configure net_device for multi tc
3954 *
3955 * @netdev: net device to configure
3956 * @tc: number of traffic classes to enable
3957 *
3958 * callback connected to the ndo_setup_tc function pointer
3959 */
3960int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3961{
3962 int cos, prio, count, offset;
3963 struct bnx2x *bp = netdev_priv(dev);
3964
3965 /* setup tc must be called under rtnl lock */
3966 ASSERT_RTNL();
3967
3968 /* no traffic classes requested. aborting */
3969 if (!num_tc) {
3970 netdev_reset_tc(dev);
3971 return 0;
3972 }
3973
3974 /* requested to support too many traffic classes */
3975 if (num_tc > bp->max_cos) {
51c1a580
MS
3976 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3977 num_tc, bp->max_cos);
6383c0b3
AE
3978 return -EINVAL;
3979 }
3980
3981 /* declare amount of supported traffic classes */
3982 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 3983 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
3984 return -EINVAL;
3985 }
3986
3987 /* configure priority to traffic class mapping */
3988 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3989 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
3990 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3991 "mapping priority %d to tc %d\n",
6383c0b3
AE
3992 prio, bp->prio_to_cos[prio]);
3993 }
3994
3995
3996 /* Use this configuration to diffrentiate tc0 from other COSes
3997 This can be used for ets or pfc, and save the effort of setting
3998 up a multio class queue disc or negotiating DCBX with a switch
3999 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4000 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4001 for (prio = 1; prio < 16; prio++) {
4002 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4003 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4004 } */
4005
4006 /* configure traffic class to transmission queue mapping */
4007 for (cos = 0; cos < bp->max_cos; cos++) {
4008 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4009 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4010 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4011 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4012 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4013 cos, offset, count);
4014 }
4015
4016 return 0;
4017}
4018
9f6c9258
DK
4019/* called with rtnl_lock */
4020int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4021{
4022 struct sockaddr *addr = p;
4023 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4024 int rc = 0;
9f6c9258 4025
51c1a580
MS
4026 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4027 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4028 return -EINVAL;
51c1a580 4029 }
614c76df 4030
a3348722
BW
4031 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4032 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4033 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4034 return -EINVAL;
51c1a580 4035 }
9f6c9258 4036
619c5cb6
VZ
4037 if (netif_running(dev)) {
4038 rc = bnx2x_set_eth_mac(bp, false);
4039 if (rc)
4040 return rc;
4041 }
4042
9f6c9258 4043 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4044
523224a3 4045 if (netif_running(dev))
619c5cb6 4046 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4047
619c5cb6 4048 return rc;
9f6c9258
DK
4049}
4050
b3b83c3f
DK
4051static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4052{
4053 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4054 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4055 u8 cos;
b3b83c3f
DK
4056
4057 /* Common */
55c11941 4058
b3b83c3f
DK
4059 if (IS_FCOE_IDX(fp_index)) {
4060 memset(sb, 0, sizeof(union host_hc_status_block));
4061 fp->status_blk_mapping = 0;
b3b83c3f 4062 } else {
b3b83c3f 4063 /* status blocks */
619c5cb6 4064 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4065 BNX2X_PCI_FREE(sb->e2_sb,
4066 bnx2x_fp(bp, fp_index,
4067 status_blk_mapping),
4068 sizeof(struct host_hc_status_block_e2));
4069 else
4070 BNX2X_PCI_FREE(sb->e1x_sb,
4071 bnx2x_fp(bp, fp_index,
4072 status_blk_mapping),
4073 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4074 }
55c11941 4075
b3b83c3f
DK
4076 /* Rx */
4077 if (!skip_rx_queue(bp, fp_index)) {
4078 bnx2x_free_rx_bds(fp);
4079
4080 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4081 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4082 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4083 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4084 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4085
4086 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4087 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4088 sizeof(struct eth_fast_path_rx_cqe) *
4089 NUM_RCQ_BD);
4090
4091 /* SGE ring */
4092 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4093 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4094 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4095 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4096 }
4097
4098 /* Tx */
4099 if (!skip_tx_queue(bp, fp_index)) {
4100 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4101 for_each_cos_in_tx_queue(fp, cos) {
65565884 4102 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4103
51c1a580 4104 DP(NETIF_MSG_IFDOWN,
94f05b0f 4105 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4106 fp_index, cos, txdata->cid);
4107
4108 BNX2X_FREE(txdata->tx_buf_ring);
4109 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4110 txdata->tx_desc_mapping,
4111 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4112 }
b3b83c3f
DK
4113 }
4114 /* end of fastpath */
4115}
4116
55c11941
MS
4117void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4118{
4119 int i;
4120 for_each_cnic_queue(bp, i)
4121 bnx2x_free_fp_mem_at(bp, i);
4122}
4123
b3b83c3f
DK
4124void bnx2x_free_fp_mem(struct bnx2x *bp)
4125{
4126 int i;
55c11941 4127 for_each_eth_queue(bp, i)
b3b83c3f
DK
4128 bnx2x_free_fp_mem_at(bp, i);
4129}
4130
1191cb83 4131static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4132{
4133 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4134 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4135 bnx2x_fp(bp, index, sb_index_values) =
4136 (__le16 *)status_blk.e2_sb->sb.index_values;
4137 bnx2x_fp(bp, index, sb_running_index) =
4138 (__le16 *)status_blk.e2_sb->sb.running_index;
4139 } else {
4140 bnx2x_fp(bp, index, sb_index_values) =
4141 (__le16 *)status_blk.e1x_sb->sb.index_values;
4142 bnx2x_fp(bp, index, sb_running_index) =
4143 (__le16 *)status_blk.e1x_sb->sb.running_index;
4144 }
4145}
4146
1191cb83
ED
4147/* Returns the number of actually allocated BDs */
4148static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4149 int rx_ring_size)
4150{
4151 struct bnx2x *bp = fp->bp;
4152 u16 ring_prod, cqe_ring_prod;
4153 int i, failure_cnt = 0;
4154
4155 fp->rx_comp_cons = 0;
4156 cqe_ring_prod = ring_prod = 0;
4157
4158 /* This routine is called only during fo init so
4159 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4160 */
4161 for (i = 0; i < rx_ring_size; i++) {
4162 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4163 failure_cnt++;
4164 continue;
4165 }
4166 ring_prod = NEXT_RX_IDX(ring_prod);
4167 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4168 WARN_ON(ring_prod <= (i - failure_cnt));
4169 }
4170
4171 if (failure_cnt)
4172 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4173 i - failure_cnt, fp->index);
4174
4175 fp->rx_bd_prod = ring_prod;
4176 /* Limit the CQE producer by the CQE ring size */
4177 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4178 cqe_ring_prod);
4179 fp->rx_pkt = fp->rx_calls = 0;
4180
15192a8c 4181 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4182
4183 return i - failure_cnt;
4184}
4185
4186static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4187{
4188 int i;
4189
4190 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4191 struct eth_rx_cqe_next_page *nextpg;
4192
4193 nextpg = (struct eth_rx_cqe_next_page *)
4194 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4195 nextpg->addr_hi =
4196 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4197 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4198 nextpg->addr_lo =
4199 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4200 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4201 }
4202}
4203
b3b83c3f
DK
4204static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4205{
4206 union host_hc_status_block *sb;
4207 struct bnx2x_fastpath *fp = &bp->fp[index];
4208 int ring_size = 0;
6383c0b3 4209 u8 cos;
c2188952 4210 int rx_ring_size = 0;
b3b83c3f 4211
a3348722
BW
4212 if (!bp->rx_ring_size &&
4213 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4214 rx_ring_size = MIN_RX_SIZE_NONTPA;
4215 bp->rx_ring_size = rx_ring_size;
55c11941 4216 } else if (!bp->rx_ring_size) {
c2188952
VZ
4217 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4218
065f8b92
YM
4219 if (CHIP_IS_E3(bp)) {
4220 u32 cfg = SHMEM_RD(bp,
4221 dev_info.port_hw_config[BP_PORT(bp)].
4222 default_cfg);
4223
4224 /* Decrease ring size for 1G functions */
4225 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4226 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4227 rx_ring_size /= 10;
4228 }
d760fc37 4229
c2188952
VZ
4230 /* allocate at least number of buffers required by FW */
4231 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4232 MIN_RX_SIZE_TPA, rx_ring_size);
4233
4234 bp->rx_ring_size = rx_ring_size;
614c76df 4235 } else /* if rx_ring_size specified - use it */
c2188952 4236 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4237
04c46736
YM
4238 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4239
b3b83c3f
DK
4240 /* Common */
4241 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4242
b3b83c3f 4243 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4244 /* status blocks */
619c5cb6 4245 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4246 BNX2X_PCI_ALLOC(sb->e2_sb,
4247 &bnx2x_fp(bp, index, status_blk_mapping),
4248 sizeof(struct host_hc_status_block_e2));
4249 else
4250 BNX2X_PCI_ALLOC(sb->e1x_sb,
4251 &bnx2x_fp(bp, index, status_blk_mapping),
4252 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4253 }
8eef2af1
DK
4254
4255 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4256 * set shortcuts for it.
4257 */
4258 if (!IS_FCOE_IDX(index))
4259 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4260
4261 /* Tx */
4262 if (!skip_tx_queue(bp, index)) {
4263 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4264 for_each_cos_in_tx_queue(fp, cos) {
65565884 4265 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4266
51c1a580
MS
4267 DP(NETIF_MSG_IFUP,
4268 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4269 index, cos);
4270
4271 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4272 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4273 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4274 &txdata->tx_desc_mapping,
b3b83c3f 4275 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4276 }
b3b83c3f
DK
4277 }
4278
4279 /* Rx */
4280 if (!skip_rx_queue(bp, index)) {
4281 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4282 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4283 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4284 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4285 &bnx2x_fp(bp, index, rx_desc_mapping),
4286 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4287
4288 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4289 &bnx2x_fp(bp, index, rx_comp_mapping),
4290 sizeof(struct eth_fast_path_rx_cqe) *
4291 NUM_RCQ_BD);
4292
4293 /* SGE ring */
4294 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4295 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4296 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4297 &bnx2x_fp(bp, index, rx_sge_mapping),
4298 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4299 /* RX BD ring */
4300 bnx2x_set_next_page_rx_bd(fp);
4301
4302 /* CQ ring */
4303 bnx2x_set_next_page_rx_cq(fp);
4304
4305 /* BDs */
4306 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4307 if (ring_size < rx_ring_size)
4308 goto alloc_mem_err;
4309 }
4310
4311 return 0;
4312
4313/* handles low memory cases */
4314alloc_mem_err:
4315 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4316 index, ring_size);
4317 /* FW will drop all packets if queue is not big enough,
4318 * In these cases we disable the queue
6383c0b3 4319 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4320 */
4321 if (ring_size < (fp->disable_tpa ?
eb722d7a 4322 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4323 /* release memory allocated for this queue */
4324 bnx2x_free_fp_mem_at(bp, index);
4325 return -ENOMEM;
4326 }
4327 return 0;
4328}
4329
55c11941
MS
4330int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4331{
4332 if (!NO_FCOE(bp))
4333 /* FCoE */
4334 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4335 /* we will fail load process instead of mark
4336 * NO_FCOE_FLAG
4337 */
4338 return -ENOMEM;
4339
4340 return 0;
4341}
4342
b3b83c3f
DK
4343int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4344{
4345 int i;
4346
55c11941
MS
4347 /* 1. Allocate FP for leading - fatal if error
4348 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4349 */
4350
4351 /* leading */
4352 if (bnx2x_alloc_fp_mem_at(bp, 0))
4353 return -ENOMEM;
6383c0b3 4354
b3b83c3f
DK
4355 /* RSS */
4356 for_each_nondefault_eth_queue(bp, i)
4357 if (bnx2x_alloc_fp_mem_at(bp, i))
4358 break;
4359
4360 /* handle memory failures */
4361 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4362 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4363
4364 WARN_ON(delta < 0);
4864a16a 4365 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4366 if (CNIC_SUPPORT(bp))
4367 /* move non eth FPs next to last eth FP
4368 * must be done in that order
4369 * FCOE_IDX < FWD_IDX < OOO_IDX
4370 */
b3b83c3f 4371
55c11941
MS
4372 /* move FCoE fp even NO_FCOE_FLAG is on */
4373 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4374 bp->num_ethernet_queues -= delta;
4375 bp->num_queues = bp->num_ethernet_queues +
4376 bp->num_cnic_queues;
b3b83c3f
DK
4377 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4378 bp->num_queues + delta, bp->num_queues);
4379 }
4380
4381 return 0;
4382}
d6214d7a 4383
523224a3
DK
4384void bnx2x_free_mem_bp(struct bnx2x *bp)
4385{
c3146eb6
DK
4386 int i;
4387
4388 for (i = 0; i < bp->fp_array_size; i++)
4389 kfree(bp->fp[i].tpa_info);
523224a3 4390 kfree(bp->fp);
15192a8c
BW
4391 kfree(bp->sp_objs);
4392 kfree(bp->fp_stats);
65565884 4393 kfree(bp->bnx2x_txq);
523224a3
DK
4394 kfree(bp->msix_table);
4395 kfree(bp->ilt);
4396}
4397
0329aba1 4398int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4399{
4400 struct bnx2x_fastpath *fp;
4401 struct msix_entry *tbl;
4402 struct bnx2x_ilt *ilt;
6383c0b3 4403 int msix_table_size = 0;
55c11941 4404 int fp_array_size, txq_array_size;
15192a8c 4405 int i;
6383c0b3
AE
4406
4407 /*
4408 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4409 * path IGU SBs plus default SB (for PF only).
6383c0b3 4410 */
1ab4434c
AE
4411 msix_table_size = bp->igu_sb_cnt;
4412 if (IS_PF(bp))
4413 msix_table_size++;
4414 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4415
6383c0b3 4416 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4417 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4418 bp->fp_array_size = fp_array_size;
4419 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4420
c3146eb6 4421 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4422 if (!fp)
4423 goto alloc_err;
c3146eb6 4424 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4425 fp[i].tpa_info =
4426 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4427 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4428 if (!(fp[i].tpa_info))
4429 goto alloc_err;
4430 }
4431
523224a3
DK
4432 bp->fp = fp;
4433
15192a8c 4434 /* allocate sp objs */
c3146eb6 4435 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4436 GFP_KERNEL);
4437 if (!bp->sp_objs)
4438 goto alloc_err;
4439
4440 /* allocate fp_stats */
c3146eb6 4441 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4442 GFP_KERNEL);
4443 if (!bp->fp_stats)
4444 goto alloc_err;
4445
65565884 4446 /* Allocate memory for the transmission queues array */
55c11941
MS
4447 txq_array_size =
4448 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4449 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4450
4451 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4452 GFP_KERNEL);
65565884
MS
4453 if (!bp->bnx2x_txq)
4454 goto alloc_err;
4455
523224a3 4456 /* msix table */
01e23742 4457 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4458 if (!tbl)
4459 goto alloc_err;
4460 bp->msix_table = tbl;
4461
4462 /* ilt */
4463 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4464 if (!ilt)
4465 goto alloc_err;
4466 bp->ilt = ilt;
4467
4468 return 0;
4469alloc_err:
4470 bnx2x_free_mem_bp(bp);
4471 return -ENOMEM;
4472
4473}
4474
a9fccec7 4475int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4476{
4477 struct bnx2x *bp = netdev_priv(dev);
4478
4479 if (unlikely(!netif_running(dev)))
4480 return 0;
4481
5d07d868 4482 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4483 return bnx2x_nic_load(bp, LOAD_NORMAL);
4484}
4485
1ac9e428
YR
4486int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4487{
4488 u32 sel_phy_idx = 0;
4489 if (bp->link_params.num_phys <= 1)
4490 return INT_PHY;
4491
4492 if (bp->link_vars.link_up) {
4493 sel_phy_idx = EXT_PHY1;
4494 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4495 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4496 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4497 sel_phy_idx = EXT_PHY2;
4498 } else {
4499
4500 switch (bnx2x_phy_selection(&bp->link_params)) {
4501 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4502 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4503 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4504 sel_phy_idx = EXT_PHY1;
4505 break;
4506 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4507 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4508 sel_phy_idx = EXT_PHY2;
4509 break;
4510 }
4511 }
4512
4513 return sel_phy_idx;
4514
4515}
4516int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4517{
4518 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4519 /*
2de67439 4520 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4521 * swapping is enabled). So when swapping is enabled, we need to reverse
4522 * the configuration
4523 */
4524
4525 if (bp->link_params.multi_phy_config &
4526 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4527 if (sel_phy_idx == EXT_PHY1)
4528 sel_phy_idx = EXT_PHY2;
4529 else if (sel_phy_idx == EXT_PHY2)
4530 sel_phy_idx = EXT_PHY1;
4531 }
4532 return LINK_CONFIG_IDX(sel_phy_idx);
4533}
4534
55c11941 4535#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4536int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4537{
4538 struct bnx2x *bp = netdev_priv(dev);
4539 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4540
4541 switch (type) {
4542 case NETDEV_FCOE_WWNN:
4543 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4544 cp->fcoe_wwn_node_name_lo);
4545 break;
4546 case NETDEV_FCOE_WWPN:
4547 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4548 cp->fcoe_wwn_port_name_lo);
4549 break;
4550 default:
51c1a580 4551 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4552 return -EINVAL;
4553 }
4554
4555 return 0;
4556}
4557#endif
4558
9f6c9258
DK
4559/* called with rtnl_lock */
4560int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4561{
4562 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4563
4564 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4565 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4566 return -EAGAIN;
4567 }
4568
4569 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4570 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4571 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4572 return -EINVAL;
51c1a580 4573 }
9f6c9258
DK
4574
4575 /* This does not race with packet allocation
4576 * because the actual alloc size is
4577 * only updated as part of load
4578 */
4579 dev->mtu = new_mtu;
4580
66371c44
MM
4581 return bnx2x_reload_if_running(dev);
4582}
4583
c8f44aff 4584netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4585 netdev_features_t features)
66371c44
MM
4586{
4587 struct bnx2x *bp = netdev_priv(dev);
4588
4589 /* TPA requires Rx CSUM offloading */
621b4d66 4590 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4591 features &= ~NETIF_F_LRO;
621b4d66
DK
4592 features &= ~NETIF_F_GRO;
4593 }
66371c44
MM
4594
4595 return features;
4596}
4597
c8f44aff 4598int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4599{
4600 struct bnx2x *bp = netdev_priv(dev);
4601 u32 flags = bp->flags;
8802f579 4602 u32 changes;
538dd2e3 4603 bool bnx2x_reload = false;
66371c44
MM
4604
4605 if (features & NETIF_F_LRO)
4606 flags |= TPA_ENABLE_FLAG;
4607 else
4608 flags &= ~TPA_ENABLE_FLAG;
4609
621b4d66
DK
4610 if (features & NETIF_F_GRO)
4611 flags |= GRO_ENABLE_FLAG;
4612 else
4613 flags &= ~GRO_ENABLE_FLAG;
4614
538dd2e3
MB
4615 if (features & NETIF_F_LOOPBACK) {
4616 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4617 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4618 bnx2x_reload = true;
4619 }
4620 } else {
4621 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4622 bp->link_params.loopback_mode = LOOPBACK_NONE;
4623 bnx2x_reload = true;
4624 }
4625 }
4626
8802f579
ED
4627 changes = flags ^ bp->flags;
4628
4629 /* if GRO is changed while LRO is enabled, dont force a reload */
4630 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4631 changes &= ~GRO_ENABLE_FLAG;
4632
4633 if (changes)
538dd2e3 4634 bnx2x_reload = true;
8802f579
ED
4635
4636 bp->flags = flags;
66371c44 4637
538dd2e3 4638 if (bnx2x_reload) {
66371c44
MM
4639 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4640 return bnx2x_reload_if_running(dev);
4641 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4642 }
4643
66371c44 4644 return 0;
9f6c9258
DK
4645}
4646
4647void bnx2x_tx_timeout(struct net_device *dev)
4648{
4649 struct bnx2x *bp = netdev_priv(dev);
4650
4651#ifdef BNX2X_STOP_ON_ERROR
4652 if (!bp->panic)
4653 bnx2x_panic();
4654#endif
7be08a72
AE
4655
4656 smp_mb__before_clear_bit();
4657 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4658 smp_mb__after_clear_bit();
4659
9f6c9258 4660 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4661 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4662}
4663
9f6c9258
DK
4664int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4665{
4666 struct net_device *dev = pci_get_drvdata(pdev);
4667 struct bnx2x *bp;
4668
4669 if (!dev) {
4670 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4671 return -ENODEV;
4672 }
4673 bp = netdev_priv(dev);
4674
4675 rtnl_lock();
4676
4677 pci_save_state(pdev);
4678
4679 if (!netif_running(dev)) {
4680 rtnl_unlock();
4681 return 0;
4682 }
4683
4684 netif_device_detach(dev);
4685
5d07d868 4686 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4687
4688 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4689
4690 rtnl_unlock();
4691
4692 return 0;
4693}
4694
4695int bnx2x_resume(struct pci_dev *pdev)
4696{
4697 struct net_device *dev = pci_get_drvdata(pdev);
4698 struct bnx2x *bp;
4699 int rc;
4700
4701 if (!dev) {
4702 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4703 return -ENODEV;
4704 }
4705 bp = netdev_priv(dev);
4706
4707 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4708 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4709 return -EAGAIN;
4710 }
4711
4712 rtnl_lock();
4713
4714 pci_restore_state(pdev);
4715
4716 if (!netif_running(dev)) {
4717 rtnl_unlock();
4718 return 0;
4719 }
4720
4721 bnx2x_set_power_state(bp, PCI_D0);
4722 netif_device_attach(dev);
4723
4724 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4725
4726 rtnl_unlock();
4727
4728 return rc;
4729}
619c5cb6
VZ
4730
4731
4732void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4733 u32 cid)
4734{
4735 /* ustorm cxt validation */
4736 cxt->ustorm_ag_context.cdu_usage =
4737 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4738 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4739 /* xcontext validation */
4740 cxt->xstorm_ag_context.cdu_reserved =
4741 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4742 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4743}
4744
1191cb83
ED
4745static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4746 u8 fw_sb_id, u8 sb_index,
4747 u8 ticks)
619c5cb6
VZ
4748{
4749
4750 u32 addr = BAR_CSTRORM_INTMEM +
4751 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4752 REG_WR8(bp, addr, ticks);
51c1a580
MS
4753 DP(NETIF_MSG_IFUP,
4754 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4755 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4756}
4757
1191cb83
ED
4758static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4759 u16 fw_sb_id, u8 sb_index,
4760 u8 disable)
619c5cb6
VZ
4761{
4762 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4763 u32 addr = BAR_CSTRORM_INTMEM +
4764 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 4765 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
4766 /* clear and set */
4767 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4768 flags |= enable_flag;
0c14e5ce 4769 REG_WR8(bp, addr, flags);
51c1a580
MS
4770 DP(NETIF_MSG_IFUP,
4771 "port %x fw_sb_id %d sb_index %d disable %d\n",
4772 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4773}
4774
4775void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4776 u8 sb_index, u8 disable, u16 usec)
4777{
4778 int port = BP_PORT(bp);
4779 u8 ticks = usec / BNX2X_BTR;
4780
4781 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4782
4783 disable = disable ? 1 : (usec ? 0 : 1);
4784 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4785}