bnx2x: Semantic renovation
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
c0cba59e 27#include <linux/prefetch.h>
9f6c9258 28#include "bnx2x_cmn.h"
523224a3 29#include "bnx2x_init.h"
042181f5 30#include "bnx2x_sp.h"
9f6c9258 31
b3b83c3f
DK
32/**
33 * bnx2x_move_fp - move content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @from: source FP index
37 * @to: destination FP index
38 *
39 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
40 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
65565884
MS
42 * source onto the target. Update txdata pointers and related
43 * content.
b3b83c3f
DK
44 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
49 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
53 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
72754080
AE
55
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
58
b3b83c3f
DK
59 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
61 to_fp->index = to;
65565884 62
15192a8c
BW
63 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
65565884
MS
69 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
72 */
73
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76 (bp)->max_cos;
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 }
81
4864a16a
YM
82 memcpy(&bp->bnx2x_txq[new_txdata_index],
83 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
84 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
86}
87
8ca5e17e
AE
88/**
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
90 *
91 * @bp: driver handle
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
94 *
95 */
96void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97{
98 if (IS_PF(bp)) {
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106 "bc %d.%d.%d%s%s",
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111 } else {
6411280a 112 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
113 }
114}
115
4864a16a
YM
116/**
117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
118 *
119 * @bp: driver handle
120 * @delta: number of eth queues which were not allocated
121 */
122static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
123{
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
125
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden
128 */
129 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) {
131 struct bnx2x_fastpath *fp = &bp->fp[i];
132 int new_idx = cos * (old_eth_num - delta) + i;
133
134 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135 sizeof(struct bnx2x_fp_txdata));
136 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
137 }
138 }
139}
140
619c5cb6
VZ
141int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
142
9f6c9258
DK
143/* free skb in the packet ring at pos idx
144 * return idx of last bd freed
145 */
6383c0b3 146static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
147 u16 idx, unsigned int *pkts_compl,
148 unsigned int *bytes_compl)
9f6c9258 149{
6383c0b3 150 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
151 struct eth_tx_start_bd *tx_start_bd;
152 struct eth_tx_bd *tx_data_bd;
153 struct sk_buff *skb = tx_buf->skb;
154 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
155 int nbd;
156
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
158 prefetch(&skb->end);
159
51c1a580 160 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 161 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
162
163 /* unmap first bd */
6383c0b3 164 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 167
619c5cb6 168
9f6c9258
DK
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170#ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
173 bnx2x_panic();
174 }
175#endif
176 new_cons = nbd + tx_buf->first_bd;
177
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181 /* Skip a parse bd... */
182 --nbd;
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187 --nbd;
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189 }
190
191 /* now free frags */
192 while (nbd > 0) {
193
6383c0b3 194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197 if (--nbd)
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199 }
200
201 /* release skb */
202 WARN_ON(!skb);
d8290ae5 203 if (likely(skb)) {
2df1a70a
TH
204 (*pkts_compl)++;
205 (*bytes_compl) += skb->len;
206 }
d8290ae5 207
40955532 208 dev_kfree_skb_any(skb);
9f6c9258
DK
209 tx_buf->first_bd = 0;
210 tx_buf->skb = NULL;
211
212 return new_cons;
213}
214
6383c0b3 215int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 216{
9f6c9258 217 struct netdev_queue *txq;
6383c0b3 218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 219 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
220
221#ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
223 return -1;
224#endif
225
6383c0b3
AE
226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
229
230 while (sw_cons != hw_cons) {
231 u16 pkt_cons;
232
233 pkt_cons = TX_BD(sw_cons);
234
51c1a580
MS
235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 238
2df1a70a 239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 240 &pkts_compl, &bytes_compl);
2df1a70a 241
9f6c9258
DK
242 sw_cons++;
243 }
244
2df1a70a
TH
245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
6383c0b3
AE
247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
249
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
254 * forever.
619c5cb6
VZ
255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
258 */
259 smp_mb();
260
9f6c9258
DK
261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 *
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
269 * stops the queue
270 */
271
272 __netif_tx_lock(txq, smp_processor_id());
273
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
277 netif_tx_wake_queue(txq);
278
279 __netif_tx_unlock(txq);
280 }
281 return 0;
282}
283
284static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285 u16 idx)
286{
287 u16 last_max = fp->last_max_sge;
288
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
291}
292
621b4d66
DK
293static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294 u16 sge_len,
295 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
296{
297 struct bnx2x *bp = fp->bp;
9f6c9258
DK
298 u16 last_max, last_elem, first_elem;
299 u16 delta = 0;
300 u16 i;
301
302 if (!sge_len)
303 return;
304
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
619c5cb6 307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
309
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
312
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
523224a3 315 bnx2x_update_last_max_sge(fp,
621b4d66 316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
317
318 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
321
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
324 last_elem++;
325
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
329 break;
330
619c5cb6
VZ
331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
333 }
334
335 if (delta > 0) {
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
339 }
340
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
344}
345
2de67439 346/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
347 * CQE (calculated by HW).
348 */
349static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
350 const struct eth_fast_path_rx_cqe *cqe,
351 bool *l4_rxhash)
e52fcb24 352{
2de67439 353 /* Get Toeplitz hash from CQE */
e52fcb24 354 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
357
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 361 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
362 }
363 *l4_rxhash = false;
e52fcb24
ED
364 return 0;
365}
366
9f6c9258 367static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 368 u16 cons, u16 prod,
619c5cb6 369 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
370{
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375 dma_addr_t mapping;
619c5cb6
VZ
376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 378
619c5cb6
VZ
379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
e52fcb24 383 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 384 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 385 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
386 fp->rx_buf_size, DMA_FROM_DEVICE);
387 /*
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
391 */
392
393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
e52fcb24 395 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397 return;
398 }
9f6c9258 399
e52fcb24
ED
400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
619c5cb6 402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 403 /* point prod_bd to new data */
9f6c9258
DK
404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
619c5cb6
VZ
407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
409
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
420 tpa_info->full_page =
421 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
422 tpa_info->gro_size = gro_size;
423 }
619c5cb6 424
9f6c9258
DK
425#ifdef BNX2X_STOP_ON_ERROR
426 fp->tpa_queue_used |= (1 << queue);
427#ifdef _ASM_GENERIC_INT_L64_H
428 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
429#else
430 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
431#endif
432 fp->tpa_queue_used);
433#endif
434}
435
e4e3c02a
VZ
436/* Timestamp option length allowed for TPA aggregation:
437 *
438 * nop nop kind length echo val
439 */
440#define TPA_TSTAMP_OPT_LEN 12
441/**
cbf1de72 442 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 443 *
cbf1de72 444 * @skb: packet skb
e8920674
DK
445 * @parsing_flags: parsing flags from the START CQE
446 * @len_on_bd: total length of the first packet for the
447 * aggregation.
cbf1de72 448 * @pkt_len: length of all segments
e8920674
DK
449 *
450 * Approximate value of the MSS for this aggregation calculated using
451 * the first packet of it.
2de67439 452 * Compute number of aggregated segments, and gso_type.
e4e3c02a 453 */
cbf1de72
YM
454static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
455 u16 len_on_bd, unsigned int pkt_len)
e4e3c02a 456{
cbf1de72 457 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 458 * other than timestamp or IPv6 extension headers.
e4e3c02a 459 */
619c5cb6
VZ
460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 463 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 464 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466 } else {
619c5cb6 467 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469 }
e4e3c02a
VZ
470
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
473 *
474 * Otherwise FW would close the aggregation.
475 */
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
cbf1de72
YM
479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
483 */
484 NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
485 skb_shinfo(skb)->gso_size);
e4e3c02a
VZ
486}
487
1191cb83
ED
488static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
489 struct bnx2x_fastpath *fp, u16 index)
490{
491 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
492 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
493 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
494 dma_addr_t mapping;
495
496 if (unlikely(page == NULL)) {
497 BNX2X_ERR("Can't alloc sge\n");
498 return -ENOMEM;
499 }
500
501 mapping = dma_map_page(&bp->pdev->dev, page, 0,
502 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
503 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
504 __free_pages(page, PAGES_PER_SGE_SHIFT);
505 BNX2X_ERR("Can't map sge\n");
506 return -ENOMEM;
507 }
508
509 sw_buf->page = page;
510 dma_unmap_addr_set(sw_buf, mapping, mapping);
511
512 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
513 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
514
515 return 0;
516}
517
9f6c9258 518static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
519 struct bnx2x_agg_info *tpa_info,
520 u16 pages,
521 struct sk_buff *skb,
619c5cb6
VZ
522 struct eth_end_agg_rx_cqe *cqe,
523 u16 cqe_idx)
9f6c9258
DK
524{
525 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
526 u32 i, frag_len, frag_size;
527 int err, j, frag_id = 0;
619c5cb6 528 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 529 u16 full_page = 0, gro_size = 0;
9f6c9258 530
619c5cb6 531 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
532
533 if (fp->mode == TPA_MODE_GRO) {
534 gro_size = tpa_info->gro_size;
535 full_page = tpa_info->full_page;
536 }
9f6c9258
DK
537
538 /* This is needed in order to enable forwarding support */
cbf1de72
YM
539 if (frag_size)
540 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
541 le16_to_cpu(cqe->pkt_len));
621b4d66 542
9f6c9258
DK
543#ifdef BNX2X_STOP_ON_ERROR
544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 pages, cqe_idx);
619c5cb6 547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
548 bnx2x_panic();
549 return -EINVAL;
550 }
551#endif
552
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
556
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
561 else /* LRO */
562 frag_len = min_t(u32, frag_size,
563 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
564
9f6c9258
DK
565 rx_pg = &fp->rx_page_ring[sge_idx];
566 old_rx_pg = *rx_pg;
567
568 /* If we fail to allocate a substitute page, we simply stop
569 where we are and drop the whole packet */
570 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
571 if (unlikely(err)) {
15192a8c 572 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
573 return err;
574 }
575
576 /* Unmap the page as we r going to pass it to the stack */
577 dma_unmap_page(&bp->pdev->dev,
578 dma_unmap_addr(&old_rx_pg, mapping),
579 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
9f6c9258 580 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
581 if (fp->mode == TPA_MODE_LRO)
582 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
583 else { /* GRO */
584 int rem;
585 int offset = 0;
586 for (rem = frag_len; rem > 0; rem -= gro_size) {
587 int len = rem > gro_size ? gro_size : rem;
588 skb_fill_page_desc(skb, frag_id++,
589 old_rx_pg.page, offset, len);
590 if (offset)
591 get_page(old_rx_pg.page);
592 offset += len;
593 }
594 }
9f6c9258
DK
595
596 skb->data_len += frag_len;
e1ac50f6 597 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
9f6c9258
DK
598 skb->len += frag_len;
599
600 frag_size -= frag_len;
601 }
602
603 return 0;
604}
605
d46d132c
ED
606static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
607{
608 if (fp->rx_frag_size)
609 put_page(virt_to_head_page(data));
610 else
611 kfree(data);
612}
613
614static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
615{
616 if (fp->rx_frag_size)
617 return netdev_alloc_frag(fp->rx_frag_size);
618
619 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
620}
621
9969085e
YM
622#ifdef CONFIG_INET
623static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
624{
625 const struct iphdr *iph = ip_hdr(skb);
626 struct tcphdr *th;
627
628 skb_set_transport_header(skb, sizeof(struct iphdr));
629 th = tcp_hdr(skb);
630
631 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
632 iph->saddr, iph->daddr, 0);
633}
634
635static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
636{
637 struct ipv6hdr *iph = ipv6_hdr(skb);
638 struct tcphdr *th;
639
640 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
641 th = tcp_hdr(skb);
642
643 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
644 &iph->saddr, &iph->daddr, 0);
645}
646#endif
647
648static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
649 struct sk_buff *skb)
650{
651#ifdef CONFIG_INET
cbf1de72 652 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
653 skb_set_network_header(skb, 0);
654 switch (be16_to_cpu(skb->protocol)) {
655 case ETH_P_IP:
656 bnx2x_gro_ip_csum(bp, skb);
657 break;
658 case ETH_P_IPV6:
659 bnx2x_gro_ipv6_csum(bp, skb);
660 break;
661 default:
662 BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
663 be16_to_cpu(skb->protocol));
664 }
665 tcp_gro_complete(skb);
666 }
667#endif
668 napi_gro_receive(&fp->napi, skb);
669}
670
1191cb83
ED
671static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
672 struct bnx2x_agg_info *tpa_info,
673 u16 pages,
674 struct eth_end_agg_rx_cqe *cqe,
675 u16 cqe_idx)
9f6c9258 676{
619c5cb6 677 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 678 u8 pad = tpa_info->placement_offset;
619c5cb6 679 u16 len = tpa_info->len_on_bd;
e52fcb24 680 struct sk_buff *skb = NULL;
621b4d66 681 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
682 u8 old_tpa_state = tpa_info->tpa_state;
683
684 tpa_info->tpa_state = BNX2X_TPA_STOP;
685
686 /* If we there was an error during the handling of the TPA_START -
687 * drop this aggregation.
688 */
689 if (old_tpa_state == BNX2X_TPA_ERROR)
690 goto drop;
691
e52fcb24 692 /* Try to allocate the new data */
d46d132c 693 new_data = bnx2x_frag_alloc(fp);
9f6c9258
DK
694 /* Unmap skb in the pool anyway, as we are going to change
695 pool entry status to BNX2X_TPA_STOP even if new skb allocation
696 fails. */
697 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 698 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 699 if (likely(new_data))
d46d132c 700 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 701
e52fcb24 702 if (likely(skb)) {
9f6c9258 703#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 704 if (pad + len > fp->rx_buf_size) {
51c1a580 705 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 706 pad, len, fp->rx_buf_size);
9f6c9258
DK
707 bnx2x_panic();
708 return;
709 }
710#endif
711
e52fcb24 712 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 713 skb_put(skb, len);
e52fcb24 714 skb->rxhash = tpa_info->rxhash;
a334b5fb 715 skb->l4_rxhash = tpa_info->l4_rxhash;
9f6c9258
DK
716
717 skb->protocol = eth_type_trans(skb, bp->dev);
718 skb->ip_summed = CHECKSUM_UNNECESSARY;
719
621b4d66
DK
720 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
721 skb, cqe, cqe_idx)) {
619c5cb6
VZ
722 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
723 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9969085e 724 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 725 } else {
51c1a580
MS
726 DP(NETIF_MSG_RX_STATUS,
727 "Failed to allocate new pages - dropping packet!\n");
40955532 728 dev_kfree_skb_any(skb);
9f6c9258
DK
729 }
730
731
e52fcb24
ED
732 /* put new data in bin */
733 rx_buf->data = new_data;
9f6c9258 734
619c5cb6 735 return;
9f6c9258 736 }
d46d132c 737 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
738drop:
739 /* drop the packet and keep the buffer in the bin */
740 DP(NETIF_MSG_RX_STATUS,
741 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 742 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
743}
744
1191cb83
ED
745static int bnx2x_alloc_rx_data(struct bnx2x *bp,
746 struct bnx2x_fastpath *fp, u16 index)
747{
748 u8 *data;
749 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
750 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
751 dma_addr_t mapping;
752
d46d132c 753 data = bnx2x_frag_alloc(fp);
1191cb83
ED
754 if (unlikely(data == NULL))
755 return -ENOMEM;
756
757 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
758 fp->rx_buf_size,
759 DMA_FROM_DEVICE);
760 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 761 bnx2x_frag_free(fp, data);
1191cb83
ED
762 BNX2X_ERR("Can't map rx data\n");
763 return -ENOMEM;
764 }
765
766 rx_buf->data = data;
767 dma_unmap_addr_set(rx_buf, mapping, mapping);
768
769 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
770 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
771
772 return 0;
773}
774
15192a8c
BW
775static
776void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
777 struct bnx2x_fastpath *fp,
778 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 779{
e488921f
MS
780 /* Do nothing if no L4 csum validation was done.
781 * We do not check whether IP csum was validated. For IPv4 we assume
782 * that if the card got as far as validating the L4 csum, it also
783 * validated the IP csum. IPv6 has no IP csum.
784 */
d6cb3e41 785 if (cqe->fast_path_cqe.status_flags &
e488921f 786 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
787 return;
788
e488921f 789 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
790
791 if (cqe->fast_path_cqe.type_error_flags &
792 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
793 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 794 qstats->hw_csum_err++;
d6cb3e41
ED
795 else
796 skb->ip_summed = CHECKSUM_UNNECESSARY;
797}
9f6c9258
DK
798
799int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
800{
801 struct bnx2x *bp = fp->bp;
802 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
803 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
804 int rx_pkt = 0;
805
806#ifdef BNX2X_STOP_ON_ERROR
807 if (unlikely(bp->panic))
808 return 0;
809#endif
810
811 /* CQ "next element" is of the size of the regular element,
812 that's why it's ok here */
813 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
814 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
815 hw_comp_cons++;
816
817 bd_cons = fp->rx_bd_cons;
818 bd_prod = fp->rx_bd_prod;
819 bd_prod_fw = bd_prod;
820 sw_comp_cons = fp->rx_comp_cons;
821 sw_comp_prod = fp->rx_comp_prod;
822
823 /* Memory barrier necessary as speculative reads of the rx
824 * buffer can be ahead of the index in the status block
825 */
826 rmb();
827
828 DP(NETIF_MSG_RX_STATUS,
829 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
830 fp->index, hw_comp_cons, sw_comp_cons);
831
832 while (sw_comp_cons != hw_comp_cons) {
833 struct sw_rx_bd *rx_buf = NULL;
834 struct sk_buff *skb;
835 union eth_rx_cqe *cqe;
619c5cb6 836 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 837 u8 cqe_fp_flags;
619c5cb6 838 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 839 u16 len, pad, queue;
e52fcb24 840 u8 *data;
a334b5fb 841 bool l4_rxhash;
9f6c9258 842
619c5cb6
VZ
843#ifdef BNX2X_STOP_ON_ERROR
844 if (unlikely(bp->panic))
845 return 0;
846#endif
847
9f6c9258
DK
848 comp_ring_cons = RCQ_BD(sw_comp_cons);
849 bd_prod = RX_BD(bd_prod);
850 bd_cons = RX_BD(bd_cons);
851
9f6c9258 852 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
853 cqe_fp = &cqe->fast_path_cqe;
854 cqe_fp_flags = cqe_fp->type_error_flags;
855 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 856
51c1a580
MS
857 DP(NETIF_MSG_RX_STATUS,
858 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
859 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
860 cqe_fp_flags, cqe_fp->status_flags,
861 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
862 le16_to_cpu(cqe_fp->vlan_tag),
863 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
864
865 /* is this a slowpath msg? */
619c5cb6 866 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
867 bnx2x_sp_event(fp, cqe);
868 goto next_cqe;
e52fcb24 869 }
621b4d66 870
e52fcb24
ED
871 rx_buf = &fp->rx_buf_ring[bd_cons];
872 data = rx_buf->data;
9f6c9258 873
e52fcb24 874 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
875 struct bnx2x_agg_info *tpa_info;
876 u16 frag_size, pages;
619c5cb6 877#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
878 /* sanity check */
879 if (fp->disable_tpa &&
880 (CQE_TYPE_START(cqe_fp_type) ||
881 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 882 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 883 CQE_TYPE(cqe_fp_type));
619c5cb6 884#endif
9f6c9258 885
e52fcb24
ED
886 if (CQE_TYPE_START(cqe_fp_type)) {
887 u16 queue = cqe_fp->queue_index;
888 DP(NETIF_MSG_RX_STATUS,
889 "calling tpa_start on queue %d\n",
890 queue);
9f6c9258 891
e52fcb24
ED
892 bnx2x_tpa_start(fp, queue,
893 bd_cons, bd_prod,
894 cqe_fp);
621b4d66 895
e52fcb24 896 goto next_rx;
e52fcb24 897
621b4d66
DK
898 }
899 queue = cqe->end_agg_cqe.queue_index;
900 tpa_info = &fp->tpa_info[queue];
901 DP(NETIF_MSG_RX_STATUS,
902 "calling tpa_stop on queue %d\n",
903 queue);
904
905 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
906 tpa_info->len_on_bd;
907
908 if (fp->mode == TPA_MODE_GRO)
909 pages = (frag_size + tpa_info->full_page - 1) /
910 tpa_info->full_page;
911 else
912 pages = SGE_PAGE_ALIGN(frag_size) >>
913 SGE_PAGE_SHIFT;
914
915 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
916 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 917#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
918 if (bp->panic)
919 return 0;
9f6c9258
DK
920#endif
921
621b4d66
DK
922 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
923 goto next_cqe;
e52fcb24
ED
924 }
925 /* non TPA */
621b4d66 926 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
927 pad = cqe_fp->placement_offset;
928 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 929 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
930 pad + RX_COPY_THRESH,
931 DMA_FROM_DEVICE);
932 pad += NET_SKB_PAD;
933 prefetch(data + pad); /* speedup eth_type_trans() */
934 /* is this an error packet? */
935 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 936 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
937 "ERROR flags %x rx packet %u\n",
938 cqe_fp_flags, sw_comp_cons);
15192a8c 939 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
940 goto reuse_rx;
941 }
9f6c9258 942
e52fcb24
ED
943 /* Since we don't have a jumbo ring
944 * copy small packets if mtu > 1500
945 */
946 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
947 (len <= RX_COPY_THRESH)) {
948 skb = netdev_alloc_skb_ip_align(bp->dev, len);
949 if (skb == NULL) {
51c1a580 950 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 951 "ERROR packet dropped because of alloc failure\n");
15192a8c 952 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
953 goto reuse_rx;
954 }
e52fcb24
ED
955 memcpy(skb->data, data + pad, len);
956 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
957 } else {
958 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 959 dma_unmap_single(&bp->pdev->dev,
e52fcb24 960 dma_unmap_addr(rx_buf, mapping),
a8c94b91 961 fp->rx_buf_size,
9f6c9258 962 DMA_FROM_DEVICE);
d46d132c 963 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 964 if (unlikely(!skb)) {
d46d132c 965 bnx2x_frag_free(fp, data);
15192a8c
BW
966 bnx2x_fp_qstats(bp, fp)->
967 rx_skb_alloc_failed++;
e52fcb24
ED
968 goto next_rx;
969 }
9f6c9258 970 skb_reserve(skb, pad);
9f6c9258 971 } else {
51c1a580
MS
972 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
973 "ERROR packet dropped because of alloc failure\n");
15192a8c 974 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 975reuse_rx:
e52fcb24 976 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
977 goto next_rx;
978 }
036d2df9 979 }
9f6c9258 980
036d2df9
DK
981 skb_put(skb, len);
982 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 983
036d2df9 984 /* Set Toeplitz hash for a none-LRO skb */
a334b5fb
ED
985 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
986 skb->l4_rxhash = l4_rxhash;
9f6c9258 987
036d2df9 988 skb_checksum_none_assert(skb);
f85582f8 989
d6cb3e41 990 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
991 bnx2x_csum_validate(skb, cqe, fp,
992 bnx2x_fp_qstats(bp, fp));
9f6c9258 993
f233cafe 994 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 995
619c5cb6
VZ
996 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
997 PARSING_FLAGS_VLAN)
9bcc0893 998 __vlan_hwaccel_put_tag(skb,
619c5cb6 999 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 1000 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
1001
1002
1003next_rx:
e52fcb24 1004 rx_buf->data = NULL;
9f6c9258
DK
1005
1006 bd_cons = NEXT_RX_IDX(bd_cons);
1007 bd_prod = NEXT_RX_IDX(bd_prod);
1008 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1009 rx_pkt++;
1010next_cqe:
1011 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1012 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1013
1014 if (rx_pkt == budget)
1015 break;
1016 } /* while */
1017
1018 fp->rx_bd_cons = bd_cons;
1019 fp->rx_bd_prod = bd_prod_fw;
1020 fp->rx_comp_cons = sw_comp_cons;
1021 fp->rx_comp_prod = sw_comp_prod;
1022
1023 /* Update producers */
1024 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1025 fp->rx_sge_prod);
1026
1027 fp->rx_pkt += rx_pkt;
1028 fp->rx_calls++;
1029
1030 return rx_pkt;
1031}
1032
1033static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1034{
1035 struct bnx2x_fastpath *fp = fp_cookie;
1036 struct bnx2x *bp = fp->bp;
6383c0b3 1037 u8 cos;
9f6c9258 1038
51c1a580
MS
1039 DP(NETIF_MSG_INTR,
1040 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3
DK
1041 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1042 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1043
1044#ifdef BNX2X_STOP_ON_ERROR
1045 if (unlikely(bp->panic))
1046 return IRQ_HANDLED;
1047#endif
1048
1049 /* Handle Rx and Tx according to MSI-X vector */
1050 prefetch(fp->rx_cons_sb);
6383c0b3
AE
1051
1052 for_each_cos_in_tx_queue(fp, cos)
65565884 1053 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1054
523224a3 1055 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1056 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1057
1058 return IRQ_HANDLED;
1059}
1060
9f6c9258
DK
1061/* HW Lock for shared dual port PHYs */
1062void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1063{
1064 mutex_lock(&bp->port.phy_mutex);
1065
8203c4b6 1066 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1067}
1068
1069void bnx2x_release_phy_lock(struct bnx2x *bp)
1070{
8203c4b6 1071 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1072
1073 mutex_unlock(&bp->port.phy_mutex);
1074}
1075
0793f83f
DK
1076/* calculates MF speed according to current linespeed and MF configuration */
1077u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1078{
1079 u16 line_speed = bp->link_vars.line_speed;
1080 if (IS_MF(bp)) {
faa6fcbb
DK
1081 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1082 bp->mf_config[BP_VN(bp)]);
1083
1084 /* Calculate the current MAX line speed limit for the MF
1085 * devices
0793f83f 1086 */
faa6fcbb
DK
1087 if (IS_MF_SI(bp))
1088 line_speed = (line_speed * maxCfg) / 100;
1089 else { /* SD mode */
0793f83f
DK
1090 u16 vn_max_rate = maxCfg * 100;
1091
1092 if (vn_max_rate < line_speed)
1093 line_speed = vn_max_rate;
faa6fcbb 1094 }
0793f83f
DK
1095 }
1096
1097 return line_speed;
1098}
1099
2ae17f66
VZ
1100/**
1101 * bnx2x_fill_report_data - fill link report data to report
1102 *
1103 * @bp: driver handle
1104 * @data: link state to update
1105 *
1106 * It uses a none-atomic bit operations because is called under the mutex.
1107 */
1191cb83
ED
1108static void bnx2x_fill_report_data(struct bnx2x *bp,
1109 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1110{
1111 u16 line_speed = bnx2x_get_mf_speed(bp);
1112
1113 memset(data, 0, sizeof(*data));
1114
1115 /* Fill the report data: efective line speed */
1116 data->line_speed = line_speed;
1117
1118 /* Link is down */
1119 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1120 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1121 &data->link_report_flags);
1122
1123 /* Full DUPLEX */
1124 if (bp->link_vars.duplex == DUPLEX_FULL)
1125 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1126
1127 /* Rx Flow Control is ON */
1128 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1129 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1130
1131 /* Tx Flow Control is ON */
1132 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1133 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1134}
1135
1136/**
1137 * bnx2x_link_report - report link status to OS.
1138 *
1139 * @bp: driver handle
1140 *
1141 * Calls the __bnx2x_link_report() under the same locking scheme
1142 * as a link/PHY state managing code to ensure a consistent link
1143 * reporting.
1144 */
1145
9f6c9258
DK
1146void bnx2x_link_report(struct bnx2x *bp)
1147{
2ae17f66
VZ
1148 bnx2x_acquire_phy_lock(bp);
1149 __bnx2x_link_report(bp);
1150 bnx2x_release_phy_lock(bp);
1151}
9f6c9258 1152
2ae17f66
VZ
1153/**
1154 * __bnx2x_link_report - report link status to OS.
1155 *
1156 * @bp: driver handle
1157 *
1158 * None atomic inmlementation.
1159 * Should be called under the phy_lock.
1160 */
1161void __bnx2x_link_report(struct bnx2x *bp)
1162{
1163 struct bnx2x_link_report_data cur_data;
9f6c9258 1164
2ae17f66 1165 /* reread mf_cfg */
ad5afc89 1166 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1167 bnx2x_read_mf_cfg(bp);
1168
1169 /* Read the current link report info */
1170 bnx2x_fill_report_data(bp, &cur_data);
1171
1172 /* Don't report link down or exactly the same link status twice */
1173 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1174 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1175 &bp->last_reported_link.link_report_flags) &&
1176 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1177 &cur_data.link_report_flags)))
1178 return;
1179
1180 bp->link_cnt++;
9f6c9258 1181
2ae17f66
VZ
1182 /* We are going to report a new link parameters now -
1183 * remember the current data for the next time.
1184 */
1185 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1186
2ae17f66
VZ
1187 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1188 &cur_data.link_report_flags)) {
1189 netif_carrier_off(bp->dev);
1190 netdev_err(bp->dev, "NIC Link is Down\n");
1191 return;
1192 } else {
94f05b0f
JP
1193 const char *duplex;
1194 const char *flow;
1195
2ae17f66 1196 netif_carrier_on(bp->dev);
9f6c9258 1197
2ae17f66
VZ
1198 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1199 &cur_data.link_report_flags))
94f05b0f 1200 duplex = "full";
9f6c9258 1201 else
94f05b0f 1202 duplex = "half";
9f6c9258 1203
2ae17f66
VZ
1204 /* Handle the FC at the end so that only these flags would be
1205 * possibly set. This way we may easily check if there is no FC
1206 * enabled.
1207 */
1208 if (cur_data.link_report_flags) {
1209 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1210 &cur_data.link_report_flags)) {
2ae17f66
VZ
1211 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1212 &cur_data.link_report_flags))
94f05b0f
JP
1213 flow = "ON - receive & transmit";
1214 else
1215 flow = "ON - receive";
9f6c9258 1216 } else {
94f05b0f 1217 flow = "ON - transmit";
9f6c9258 1218 }
94f05b0f
JP
1219 } else {
1220 flow = "none";
9f6c9258 1221 }
94f05b0f
JP
1222 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1223 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1224 }
1225}
1226
1191cb83
ED
1227static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1228{
1229 int i;
1230
1231 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1232 struct eth_rx_sge *sge;
1233
1234 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1235 sge->addr_hi =
1236 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1237 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1238
1239 sge->addr_lo =
1240 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1241 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1242 }
1243}
1244
1245static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1246 struct bnx2x_fastpath *fp, int last)
1247{
1248 int i;
1249
1250 for (i = 0; i < last; i++) {
1251 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1252 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1253 u8 *data = first_buf->data;
1254
1255 if (data == NULL) {
1256 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1257 continue;
1258 }
1259 if (tpa_info->tpa_state == BNX2X_TPA_START)
1260 dma_unmap_single(&bp->pdev->dev,
1261 dma_unmap_addr(first_buf, mapping),
1262 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1263 bnx2x_frag_free(fp, data);
1191cb83
ED
1264 first_buf->data = NULL;
1265 }
1266}
1267
55c11941
MS
1268void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1269{
1270 int j;
1271
1272 for_each_rx_queue_cnic(bp, j) {
1273 struct bnx2x_fastpath *fp = &bp->fp[j];
1274
1275 fp->rx_bd_cons = 0;
1276
1277 /* Activate BD ring */
1278 /* Warning!
1279 * this will generate an interrupt (to the TSTORM)
1280 * must only be done after chip is initialized
1281 */
1282 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1283 fp->rx_sge_prod);
1284 }
1285}
1286
9f6c9258
DK
1287void bnx2x_init_rx_rings(struct bnx2x *bp)
1288{
1289 int func = BP_FUNC(bp);
523224a3 1290 u16 ring_prod;
9f6c9258 1291 int i, j;
25141580 1292
b3b83c3f 1293 /* Allocate TPA resources */
55c11941 1294 for_each_eth_queue(bp, j) {
523224a3 1295 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1296
a8c94b91
VZ
1297 DP(NETIF_MSG_IFUP,
1298 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1299
523224a3 1300 if (!fp->disable_tpa) {
619c5cb6 1301 /* Fill the per-aggregtion pool */
dfacf138 1302 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1303 struct bnx2x_agg_info *tpa_info =
1304 &fp->tpa_info[i];
1305 struct sw_rx_bd *first_buf =
1306 &tpa_info->first_buf;
1307
d46d132c 1308 first_buf->data = bnx2x_frag_alloc(fp);
e52fcb24 1309 if (!first_buf->data) {
51c1a580
MS
1310 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1311 j);
9f6c9258
DK
1312 bnx2x_free_tpa_pool(bp, fp, i);
1313 fp->disable_tpa = 1;
1314 break;
1315 }
619c5cb6
VZ
1316 dma_unmap_addr_set(first_buf, mapping, 0);
1317 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1318 }
523224a3
DK
1319
1320 /* "next page" elements initialization */
1321 bnx2x_set_next_page_sgl(fp);
1322
1323 /* set SGEs bit mask */
1324 bnx2x_init_sge_ring_bit_mask(fp);
1325
1326 /* Allocate SGEs and initialize the ring elements */
1327 for (i = 0, ring_prod = 0;
1328 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1329
1330 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1331 BNX2X_ERR("was only able to allocate %d rx sges\n",
1332 i);
1333 BNX2X_ERR("disabling TPA for queue[%d]\n",
1334 j);
523224a3 1335 /* Cleanup already allocated elements */
619c5cb6
VZ
1336 bnx2x_free_rx_sge_range(bp, fp,
1337 ring_prod);
1338 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1339 MAX_AGG_QS(bp));
523224a3
DK
1340 fp->disable_tpa = 1;
1341 ring_prod = 0;
1342 break;
1343 }
1344 ring_prod = NEXT_SGE_IDX(ring_prod);
1345 }
1346
1347 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1348 }
1349 }
1350
55c11941 1351 for_each_eth_queue(bp, j) {
9f6c9258
DK
1352 struct bnx2x_fastpath *fp = &bp->fp[j];
1353
1354 fp->rx_bd_cons = 0;
9f6c9258 1355
b3b83c3f
DK
1356 /* Activate BD ring */
1357 /* Warning!
1358 * this will generate an interrupt (to the TSTORM)
1359 * must only be done after chip is initialized
1360 */
1361 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1362 fp->rx_sge_prod);
9f6c9258 1363
9f6c9258
DK
1364 if (j != 0)
1365 continue;
1366
619c5cb6 1367 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1368 REG_WR(bp, BAR_USTRORM_INTMEM +
1369 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1370 U64_LO(fp->rx_comp_mapping));
1371 REG_WR(bp, BAR_USTRORM_INTMEM +
1372 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1373 U64_HI(fp->rx_comp_mapping));
1374 }
9f6c9258
DK
1375 }
1376}
f85582f8 1377
55c11941 1378static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1379{
6383c0b3 1380 u8 cos;
55c11941 1381 struct bnx2x *bp = fp->bp;
9f6c9258 1382
55c11941
MS
1383 for_each_cos_in_tx_queue(fp, cos) {
1384 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1385 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1386
55c11941
MS
1387 u16 sw_prod = txdata->tx_pkt_prod;
1388 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1389
55c11941
MS
1390 while (sw_cons != sw_prod) {
1391 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1392 &pkts_compl, &bytes_compl);
1393 sw_cons++;
9f6c9258 1394 }
55c11941
MS
1395
1396 netdev_tx_reset_queue(
1397 netdev_get_tx_queue(bp->dev,
1398 txdata->txq_index));
1399 }
1400}
1401
1402static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1403{
1404 int i;
1405
1406 for_each_tx_queue_cnic(bp, i) {
1407 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1408 }
1409}
1410
1411static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1412{
1413 int i;
1414
1415 for_each_eth_queue(bp, i) {
1416 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1417 }
1418}
1419
b3b83c3f
DK
1420static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1421{
1422 struct bnx2x *bp = fp->bp;
1423 int i;
1424
1425 /* ring wasn't allocated */
1426 if (fp->rx_buf_ring == NULL)
1427 return;
1428
1429 for (i = 0; i < NUM_RX_BD; i++) {
1430 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1431 u8 *data = rx_buf->data;
b3b83c3f 1432
e52fcb24 1433 if (data == NULL)
b3b83c3f 1434 continue;
b3b83c3f
DK
1435 dma_unmap_single(&bp->pdev->dev,
1436 dma_unmap_addr(rx_buf, mapping),
1437 fp->rx_buf_size, DMA_FROM_DEVICE);
1438
e52fcb24 1439 rx_buf->data = NULL;
d46d132c 1440 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1441 }
1442}
1443
55c11941
MS
1444static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1445{
1446 int j;
1447
1448 for_each_rx_queue_cnic(bp, j) {
1449 bnx2x_free_rx_bds(&bp->fp[j]);
1450 }
1451}
1452
9f6c9258
DK
1453static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1454{
b3b83c3f 1455 int j;
9f6c9258 1456
55c11941 1457 for_each_eth_queue(bp, j) {
9f6c9258
DK
1458 struct bnx2x_fastpath *fp = &bp->fp[j];
1459
b3b83c3f 1460 bnx2x_free_rx_bds(fp);
9f6c9258 1461
9f6c9258 1462 if (!fp->disable_tpa)
dfacf138 1463 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1464 }
1465}
1466
55c11941
MS
1467void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1468{
1469 bnx2x_free_tx_skbs_cnic(bp);
1470 bnx2x_free_rx_skbs_cnic(bp);
1471}
1472
9f6c9258
DK
1473void bnx2x_free_skbs(struct bnx2x *bp)
1474{
1475 bnx2x_free_tx_skbs(bp);
1476 bnx2x_free_rx_skbs(bp);
1477}
1478
e3835b99
DK
1479void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1480{
1481 /* load old values */
1482 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1483
1484 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1485 /* leave all but MAX value */
1486 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1487
1488 /* set new MAX value */
1489 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1490 & FUNC_MF_CFG_MAX_BW_MASK;
1491
1492 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1493 }
1494}
1495
ca92429f
DK
1496/**
1497 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1498 *
1499 * @bp: driver handle
1500 * @nvecs: number of vectors to be released
1501 */
1502static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1503{
ca92429f 1504 int i, offset = 0;
9f6c9258 1505
ca92429f
DK
1506 if (nvecs == offset)
1507 return;
ad5afc89
AE
1508
1509 /* VFs don't have a default SB */
1510 if (IS_PF(bp)) {
1511 free_irq(bp->msix_table[offset].vector, bp->dev);
1512 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1513 bp->msix_table[offset].vector);
1514 offset++;
1515 }
55c11941
MS
1516
1517 if (CNIC_SUPPORT(bp)) {
1518 if (nvecs == offset)
1519 return;
1520 offset++;
1521 }
ca92429f 1522
ec6ba945 1523 for_each_eth_queue(bp, i) {
ca92429f
DK
1524 if (nvecs == offset)
1525 return;
51c1a580
MS
1526 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1527 i, bp->msix_table[offset].vector);
9f6c9258 1528
ca92429f 1529 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1530 }
1531}
1532
d6214d7a 1533void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1534{
30a5de77 1535 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1536 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1537 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1538
1539 /* vfs don't have a default status block */
1540 if (IS_PF(bp))
1541 nvecs++;
1542
1543 bnx2x_free_msix_irqs(bp, nvecs);
1544 } else {
30a5de77 1545 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1546 }
9f6c9258
DK
1547}
1548
0e8d2ec5 1549int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1550{
1ab4434c 1551 int msix_vec = 0, i, rc;
9f6c9258 1552
1ab4434c
AE
1553 /* VFs don't have a default status block */
1554 if (IS_PF(bp)) {
1555 bp->msix_table[msix_vec].entry = msix_vec;
1556 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1557 bp->msix_table[0].entry);
1558 msix_vec++;
1559 }
9f6c9258 1560
55c11941
MS
1561 /* Cnic requires an msix vector for itself */
1562 if (CNIC_SUPPORT(bp)) {
1563 bp->msix_table[msix_vec].entry = msix_vec;
1564 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1565 msix_vec, bp->msix_table[msix_vec].entry);
1566 msix_vec++;
1567 }
1568
6383c0b3 1569 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1570 for_each_eth_queue(bp, i) {
d6214d7a 1571 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1572 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1573 msix_vec, msix_vec, i);
d6214d7a 1574 msix_vec++;
9f6c9258
DK
1575 }
1576
1ab4434c
AE
1577 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1578 msix_vec);
d6214d7a 1579
1ab4434c 1580 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1581
1582 /*
1583 * reconfigure number of tx/rx queues according to available
1584 * MSI-X vectors
1585 */
55c11941 1586 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1587 /* how less vectors we will have? */
1ab4434c 1588 int diff = msix_vec - rc;
9f6c9258 1589
51c1a580 1590 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1591
1592 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1593
1594 if (rc) {
30a5de77
DK
1595 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1596 goto no_msix;
9f6c9258 1597 }
d6214d7a
DK
1598 /*
1599 * decrease number of queues by number of unallocated entries
1600 */
55c11941
MS
1601 bp->num_ethernet_queues -= diff;
1602 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1603
51c1a580 1604 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1605 bp->num_queues);
1606 } else if (rc > 0) {
1607 /* Get by with single vector */
1608 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1609 if (rc) {
1610 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1611 rc);
1612 goto no_msix;
1613 }
1614
1615 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1616 bp->flags |= USING_SINGLE_MSIX_FLAG;
1617
55c11941
MS
1618 BNX2X_DEV_INFO("set number of queues to 1\n");
1619 bp->num_ethernet_queues = 1;
1620 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1621 } else if (rc < 0) {
51c1a580 1622 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1623 goto no_msix;
9f6c9258
DK
1624 }
1625
1626 bp->flags |= USING_MSIX_FLAG;
1627
1628 return 0;
30a5de77
DK
1629
1630no_msix:
1631 /* fall to INTx if not enough memory */
1632 if (rc == -ENOMEM)
1633 bp->flags |= DISABLE_MSI_FLAG;
1634
1635 return rc;
9f6c9258
DK
1636}
1637
1638static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1639{
ca92429f 1640 int i, rc, offset = 0;
9f6c9258 1641
ad5afc89
AE
1642 /* no default status block for vf */
1643 if (IS_PF(bp)) {
1644 rc = request_irq(bp->msix_table[offset++].vector,
1645 bnx2x_msix_sp_int, 0,
1646 bp->dev->name, bp->dev);
1647 if (rc) {
1648 BNX2X_ERR("request sp irq failed\n");
1649 return -EBUSY;
1650 }
9f6c9258
DK
1651 }
1652
55c11941
MS
1653 if (CNIC_SUPPORT(bp))
1654 offset++;
1655
ec6ba945 1656 for_each_eth_queue(bp, i) {
9f6c9258
DK
1657 struct bnx2x_fastpath *fp = &bp->fp[i];
1658 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1659 bp->dev->name, i);
1660
d6214d7a 1661 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1662 bnx2x_msix_fp_int, 0, fp->name, fp);
1663 if (rc) {
ca92429f
DK
1664 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1665 bp->msix_table[offset].vector, rc);
1666 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1667 return -EBUSY;
1668 }
1669
d6214d7a 1670 offset++;
9f6c9258
DK
1671 }
1672
ec6ba945 1673 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1674 if (IS_PF(bp)) {
1675 offset = 1 + CNIC_SUPPORT(bp);
1676 netdev_info(bp->dev,
1677 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1678 bp->msix_table[0].vector,
1679 0, bp->msix_table[offset].vector,
1680 i - 1, bp->msix_table[offset + i - 1].vector);
1681 } else {
1682 offset = CNIC_SUPPORT(bp);
1683 netdev_info(bp->dev,
1684 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1685 0, bp->msix_table[offset].vector,
1686 i - 1, bp->msix_table[offset + i - 1].vector);
1687 }
9f6c9258
DK
1688 return 0;
1689}
1690
d6214d7a 1691int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1692{
1693 int rc;
1694
1695 rc = pci_enable_msi(bp->pdev);
1696 if (rc) {
51c1a580 1697 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1698 return -1;
1699 }
1700 bp->flags |= USING_MSI_FLAG;
1701
1702 return 0;
1703}
1704
1705static int bnx2x_req_irq(struct bnx2x *bp)
1706{
1707 unsigned long flags;
30a5de77 1708 unsigned int irq;
9f6c9258 1709
30a5de77 1710 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1711 flags = 0;
1712 else
1713 flags = IRQF_SHARED;
1714
30a5de77
DK
1715 if (bp->flags & USING_MSIX_FLAG)
1716 irq = bp->msix_table[0].vector;
1717 else
1718 irq = bp->pdev->irq;
1719
1720 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1721}
1722
1191cb83 1723static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1724{
1725 int rc = 0;
30a5de77
DK
1726 if (bp->flags & USING_MSIX_FLAG &&
1727 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1728 rc = bnx2x_req_msix_irqs(bp);
1729 if (rc)
1730 return rc;
1731 } else {
619c5cb6
VZ
1732 rc = bnx2x_req_irq(bp);
1733 if (rc) {
1734 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1735 return rc;
1736 }
1737 if (bp->flags & USING_MSI_FLAG) {
1738 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1739 netdev_info(bp->dev, "using MSI IRQ %d\n",
1740 bp->dev->irq);
1741 }
1742 if (bp->flags & USING_MSIX_FLAG) {
1743 bp->dev->irq = bp->msix_table[0].vector;
1744 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1745 bp->dev->irq);
619c5cb6
VZ
1746 }
1747 }
1748
1749 return 0;
1750}
1751
55c11941
MS
1752static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1753{
1754 int i;
1755
1756 for_each_rx_queue_cnic(bp, i)
1757 napi_enable(&bnx2x_fp(bp, i, napi));
1758}
1759
1191cb83 1760static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1761{
1762 int i;
1763
55c11941 1764 for_each_eth_queue(bp, i)
9f6c9258
DK
1765 napi_enable(&bnx2x_fp(bp, i, napi));
1766}
1767
55c11941
MS
1768static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1769{
1770 int i;
1771
1772 for_each_rx_queue_cnic(bp, i)
1773 napi_disable(&bnx2x_fp(bp, i, napi));
1774}
1775
1191cb83 1776static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1777{
1778 int i;
1779
55c11941 1780 for_each_eth_queue(bp, i)
9f6c9258
DK
1781 napi_disable(&bnx2x_fp(bp, i, napi));
1782}
1783
1784void bnx2x_netif_start(struct bnx2x *bp)
1785{
4b7ed897
DK
1786 if (netif_running(bp->dev)) {
1787 bnx2x_napi_enable(bp);
55c11941
MS
1788 if (CNIC_LOADED(bp))
1789 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1790 bnx2x_int_enable(bp);
1791 if (bp->state == BNX2X_STATE_OPEN)
1792 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1793 }
1794}
1795
1796void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1797{
1798 bnx2x_int_disable_sync(bp, disable_hw);
1799 bnx2x_napi_disable(bp);
55c11941
MS
1800 if (CNIC_LOADED(bp))
1801 bnx2x_napi_disable_cnic(bp);
9f6c9258 1802}
9f6c9258 1803
8307fa3e
VZ
1804u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1805{
8307fa3e 1806 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1807
55c11941 1808 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1809 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1810 u16 ether_type = ntohs(hdr->h_proto);
1811
1812 /* Skip VLAN tag if present */
1813 if (ether_type == ETH_P_8021Q) {
1814 struct vlan_ethhdr *vhdr =
1815 (struct vlan_ethhdr *)skb->data;
1816
1817 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1818 }
1819
1820 /* If ethertype is FCoE or FIP - use FCoE ring */
1821 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1822 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1823 }
55c11941 1824
cdb9d6ae 1825 /* select a non-FCoE queue */
6383c0b3 1826 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1827}
1828
d6214d7a
DK
1829void bnx2x_set_num_queues(struct bnx2x *bp)
1830{
96305234 1831 /* RSS queues */
55c11941 1832 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1833
a3348722
BW
1834 /* override in STORAGE SD modes */
1835 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1836 bp->num_ethernet_queues = 1;
1837
ec6ba945 1838 /* Add special queues */
55c11941
MS
1839 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1840 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1841
1842 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1843}
1844
cdb9d6ae
VZ
1845/**
1846 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1847 *
1848 * @bp: Driver handle
1849 *
1850 * We currently support for at most 16 Tx queues for each CoS thus we will
1851 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1852 * bp->max_cos.
1853 *
1854 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1855 * index after all ETH L2 indices.
1856 *
1857 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1858 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1859 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1860 *
1861 * The proper configuration of skb->queue_mapping is handled by
1862 * bnx2x_select_queue() and __skb_tx_hash().
1863 *
1864 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1865 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1866 */
55c11941 1867static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1868{
6383c0b3 1869 int rc, tx, rx;
ec6ba945 1870
65565884 1871 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1872 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1873
6383c0b3 1874/* account for fcoe queue */
55c11941
MS
1875 if (include_cnic && !NO_FCOE(bp)) {
1876 rx++;
1877 tx++;
6383c0b3 1878 }
6383c0b3
AE
1879
1880 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1881 if (rc) {
1882 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1883 return rc;
1884 }
1885 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1886 if (rc) {
1887 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1888 return rc;
1889 }
1890
51c1a580 1891 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1892 tx, rx);
1893
ec6ba945
VZ
1894 return rc;
1895}
1896
1191cb83 1897static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1898{
1899 int i;
1900
1901 for_each_queue(bp, i) {
1902 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1903 u32 mtu;
a8c94b91
VZ
1904
1905 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1906 if (IS_FCOE_IDX(i))
1907 /*
1908 * Although there are no IP frames expected to arrive to
1909 * this ring we still want to add an
1910 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1911 * overrun attack.
1912 */
e52fcb24 1913 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1914 else
e52fcb24
ED
1915 mtu = bp->dev->mtu;
1916 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1917 IP_HEADER_ALIGNMENT_PADDING +
1918 ETH_OVREHEAD +
1919 mtu +
1920 BNX2X_FW_RX_ALIGN_END;
1921 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
d46d132c
ED
1922 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1923 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1924 else
1925 fp->rx_frag_size = 0;
a8c94b91
VZ
1926 }
1927}
1928
1191cb83 1929static int bnx2x_init_rss_pf(struct bnx2x *bp)
619c5cb6
VZ
1930{
1931 int i;
619c5cb6
VZ
1932 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1933
96305234 1934 /* Prepare the initial contents fo the indirection table if RSS is
619c5cb6
VZ
1935 * enabled
1936 */
5d317c6a
MS
1937 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1938 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1939 bp->fp->cl_id +
1940 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1941
1942 /*
1943 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1944 * per-port, so if explicit configuration is needed , do it only
1945 * for a PMF.
1946 *
1947 * For 57712 and newer on the other hand it's a per-function
1948 * configuration.
1949 */
5d317c6a 1950 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1951}
1952
96305234 1953int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
5d317c6a 1954 bool config_hash)
619c5cb6 1955{
3b603066 1956 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1957
1958 /* Although RSS is meaningless when there is a single HW queue we
1959 * still need it enabled in order to have HW Rx hash generated.
1960 *
1961 * if (!is_eth_multi(bp))
1962 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1963 */
1964
96305234 1965 params.rss_obj = rss_obj;
619c5cb6
VZ
1966
1967 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1968
96305234 1969 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
619c5cb6 1970
96305234
DK
1971 /* RSS configuration */
1972 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1973 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1974 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1975 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
5d317c6a
MS
1976 if (rss_obj->udp_rss_v4)
1977 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1978 if (rss_obj->udp_rss_v6)
1979 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
619c5cb6 1980
96305234
DK
1981 /* Hash bits */
1982 params.rss_result_mask = MULTI_MASK;
619c5cb6 1983
5d317c6a 1984 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 1985
96305234
DK
1986 if (config_hash) {
1987 /* RSS keys */
8376d0bc 1988 prandom_bytes(params.rss_key, sizeof(params.rss_key));
96305234 1989 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
1990 }
1991
1992 return bnx2x_config_rss(bp, &params);
1993}
1994
1191cb83 1995static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 1996{
3b603066 1997 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
1998
1999 /* Prepare parameters for function state transitions */
2000 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2001
2002 func_params.f_obj = &bp->func_obj;
2003 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2004
2005 func_params.params.hw_init.load_phase = load_code;
2006
2007 return bnx2x_func_state_change(bp, &func_params);
2008}
2009
2010/*
2011 * Cleans the object that have internal lists without sending
2012 * ramrods. Should be run when interrutps are disabled.
2013 */
2014static void bnx2x_squeeze_objects(struct bnx2x *bp)
2015{
2016 int rc;
2017 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2018 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2019 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2020
2021 /***************** Cleanup MACs' object first *************************/
2022
2023 /* Wait for completion of requested */
2024 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2025 /* Perform a dry cleanup */
2026 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2027
2028 /* Clean ETH primary MAC */
2029 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2030 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2031 &ramrod_flags);
2032 if (rc != 0)
2033 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2034
2035 /* Cleanup UC list */
2036 vlan_mac_flags = 0;
2037 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2038 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2039 &ramrod_flags);
2040 if (rc != 0)
2041 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2042
2043 /***************** Now clean mcast object *****************************/
2044 rparam.mcast_obj = &bp->mcast_obj;
2045 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2046
2047 /* Add a DEL command... */
2048 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2049 if (rc < 0)
51c1a580
MS
2050 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2051 rc);
619c5cb6
VZ
2052
2053 /* ...and wait until all pending commands are cleared */
2054 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2055 while (rc != 0) {
2056 if (rc < 0) {
2057 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2058 rc);
2059 return;
2060 }
2061
2062 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2063 }
2064}
2065
2066#ifndef BNX2X_STOP_ON_ERROR
2067#define LOAD_ERROR_EXIT(bp, label) \
2068 do { \
2069 (bp)->state = BNX2X_STATE_ERROR; \
2070 goto label; \
2071 } while (0)
55c11941
MS
2072
2073#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2074 do { \
2075 bp->cnic_loaded = false; \
2076 goto label; \
2077 } while (0)
2078#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2079#define LOAD_ERROR_EXIT(bp, label) \
2080 do { \
2081 (bp)->state = BNX2X_STATE_ERROR; \
2082 (bp)->panic = 1; \
2083 return -EBUSY; \
2084 } while (0)
55c11941
MS
2085#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2086 do { \
2087 bp->cnic_loaded = false; \
2088 (bp)->panic = 1; \
2089 return -EBUSY; \
2090 } while (0)
2091#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2092
ad5afc89
AE
2093static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2094{
2095 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2096 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2097 return;
2098}
2099
2100static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2101{
8db573ba 2102 int num_groups, vf_headroom = 0;
ad5afc89 2103 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2104
ad5afc89
AE
2105 /* number of queues for statistics is number of eth queues + FCoE */
2106 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2107
ad5afc89
AE
2108 /* Total number of FW statistics requests =
2109 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2110 * and fcoe l2 queue) stats + num of queues (which includes another 1
2111 * for fcoe l2 queue if applicable)
2112 */
2113 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2114
8db573ba
AE
2115 /* vf stats appear in the request list, but their data is allocated by
2116 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2117 * it is used to determine where to place the vf stats queries in the
2118 * request struct
2119 */
2120 if (IS_SRIOV(bp))
6411280a 2121 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2122
ad5afc89
AE
2123 /* Request is built from stats_query_header and an array of
2124 * stats_query_cmd_group each of which contains
2125 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2126 * configured in the stats_query_header.
2127 */
2128 num_groups =
8db573ba
AE
2129 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2130 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2131 1 : 0));
2132
8db573ba
AE
2133 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2134 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2135 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2136 num_groups * sizeof(struct stats_query_cmd_group);
2137
2138 /* Data for statistics requests + stats_counter
2139 * stats_counter holds per-STORM counters that are incremented
2140 * when STORM has finished with the current request.
2141 * memory for FCoE offloaded statistics are counted anyway,
2142 * even if they will not be sent.
2143 * VF stats are not accounted for here as the data of VF stats is stored
2144 * in memory allocated by the VF, not here.
2145 */
2146 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2147 sizeof(struct per_pf_stats) +
2148 sizeof(struct fcoe_statistics_params) +
2149 sizeof(struct per_queue_stats) * num_queue_stats +
2150 sizeof(struct stats_counter);
2151
2152 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2153 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2154
2155 /* Set shortcuts */
2156 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2157 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2158 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2159 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2160 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2161 bp->fw_stats_req_sz;
2162
2163 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2164 U64_HI(bp->fw_stats_req_mapping),
2165 U64_LO(bp->fw_stats_req_mapping));
2166 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2167 U64_HI(bp->fw_stats_data_mapping),
2168 U64_LO(bp->fw_stats_data_mapping));
2169 return 0;
2170
2171alloc_mem_err:
2172 bnx2x_free_fw_stats_mem(bp);
2173 BNX2X_ERR("Can't allocate FW stats memory\n");
2174 return -ENOMEM;
2175}
2176
2177/* send load request to mcp and analyze response */
2178static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2179{
2180 /* init fw_seq */
2181 bp->fw_seq =
2182 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2183 DRV_MSG_SEQ_NUMBER_MASK);
2184 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2185
2186 /* Get current FW pulse sequence */
2187 bp->fw_drv_pulse_wr_seq =
2188 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2189 DRV_PULSE_SEQ_MASK);
2190 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2191
2192 /* load request */
2193 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2194 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2195
2196 /* if mcp fails to respond we must abort */
2197 if (!(*load_code)) {
2198 BNX2X_ERR("MCP response failure, aborting\n");
2199 return -EBUSY;
2200 }
2201
2202 /* If mcp refused (e.g. other port is in diagnostic mode) we
2203 * must abort
2204 */
2205 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2206 BNX2X_ERR("MCP refused load request, aborting\n");
2207 return -EBUSY;
2208 }
2209 return 0;
2210}
2211
2212/* check whether another PF has already loaded FW to chip. In
2213 * virtualized environments a pf from another VM may have already
2214 * initialized the device including loading FW
2215 */
2216int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2217{
2218 /* is another pf loaded on this engine? */
2219 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2220 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2221 /* build my FW version dword */
2222 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2223 (BCM_5710_FW_MINOR_VERSION << 8) +
2224 (BCM_5710_FW_REVISION_VERSION << 16) +
2225 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2226
2227 /* read loaded FW from chip */
2228 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2229
2230 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2231 loaded_fw, my_fw);
2232
2233 /* abort nic load if version mismatch */
2234 if (my_fw != loaded_fw) {
2235 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
452427b0 2236 loaded_fw, my_fw);
ad5afc89
AE
2237 return -EBUSY;
2238 }
2239 }
2240 return 0;
2241}
2242
2243/* returns the "mcp load_code" according to global load_count array */
2244static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2245{
2246 int path = BP_PATH(bp);
2247
2248 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2249 path, load_count[path][0], load_count[path][1],
2250 load_count[path][2]);
2251 load_count[path][0]++;
2252 load_count[path][1 + port]++;
2253 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2254 path, load_count[path][0], load_count[path][1],
2255 load_count[path][2]);
2256 if (load_count[path][0] == 1)
2257 return FW_MSG_CODE_DRV_LOAD_COMMON;
2258 else if (load_count[path][1 + port] == 1)
2259 return FW_MSG_CODE_DRV_LOAD_PORT;
2260 else
2261 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2262}
2263
2264/* mark PMF if applicable */
2265static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2266{
2267 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2268 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2269 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2270 bp->port.pmf = 1;
2271 /* We need the barrier to ensure the ordering between the
2272 * writing to bp->port.pmf here and reading it from the
2273 * bnx2x_periodic_task().
2274 */
2275 smp_mb();
2276 } else {
2277 bp->port.pmf = 0;
452427b0
YM
2278 }
2279
ad5afc89
AE
2280 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2281}
2282
2283static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2284{
2285 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2286 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2287 (bp->common.shmem2_base)) {
2288 if (SHMEM2_HAS(bp, dcc_support))
2289 SHMEM2_WR(bp, dcc_support,
2290 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2291 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2292 if (SHMEM2_HAS(bp, afex_driver_support))
2293 SHMEM2_WR(bp, afex_driver_support,
2294 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2295 }
2296
2297 /* Set AFEX default VLAN tag to an invalid value */
2298 bp->afex_def_vlan_tag = -1;
452427b0
YM
2299}
2300
1191cb83
ED
2301/**
2302 * bnx2x_bz_fp - zero content of the fastpath structure.
2303 *
2304 * @bp: driver handle
2305 * @index: fastpath index to be zeroed
2306 *
2307 * Makes sure the contents of the bp->fp[index].napi is kept
2308 * intact.
2309 */
2310static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2311{
2312 struct bnx2x_fastpath *fp = &bp->fp[index];
15192a8c
BW
2313 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2314
65565884 2315 int cos;
1191cb83 2316 struct napi_struct orig_napi = fp->napi;
15192a8c 2317 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1191cb83 2318 /* bzero bnx2x_fastpath contents */
15192a8c
BW
2319 if (bp->stats_init) {
2320 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1191cb83 2321 memset(fp, 0, sizeof(*fp));
15192a8c 2322 } else {
1191cb83
ED
2323 /* Keep Queue statistics */
2324 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2325 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2326
2327 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2328 GFP_KERNEL);
2329 if (tmp_eth_q_stats)
15192a8c 2330 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
1191cb83
ED
2331 sizeof(struct bnx2x_eth_q_stats));
2332
2333 tmp_eth_q_stats_old =
2334 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2335 GFP_KERNEL);
2336 if (tmp_eth_q_stats_old)
15192a8c 2337 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
1191cb83
ED
2338 sizeof(struct bnx2x_eth_q_stats_old));
2339
15192a8c 2340 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1191cb83
ED
2341 memset(fp, 0, sizeof(*fp));
2342
2343 if (tmp_eth_q_stats) {
15192a8c
BW
2344 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2345 sizeof(struct bnx2x_eth_q_stats));
1191cb83
ED
2346 kfree(tmp_eth_q_stats);
2347 }
2348
2349 if (tmp_eth_q_stats_old) {
15192a8c 2350 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
1191cb83
ED
2351 sizeof(struct bnx2x_eth_q_stats_old));
2352 kfree(tmp_eth_q_stats_old);
2353 }
2354
2355 }
2356
2357 /* Restore the NAPI object as it has been already initialized */
2358 fp->napi = orig_napi;
15192a8c 2359 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2360 fp->bp = bp;
2361 fp->index = index;
2362 if (IS_ETH_FP(fp))
2363 fp->max_cos = bp->max_cos;
2364 else
2365 /* Special queues support only one CoS */
2366 fp->max_cos = 1;
2367
65565884 2368 /* Init txdata pointers */
65565884
MS
2369 if (IS_FCOE_FP(fp))
2370 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2371 if (IS_ETH_FP(fp))
2372 for_each_cos_in_tx_queue(fp, cos)
2373 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2374 BNX2X_NUM_ETH_QUEUES(bp) + index];
2375
1191cb83
ED
2376 /*
2377 * set the tpa flag for each queue. The tpa flag determines the queue
2378 * minimal size so it must be set prior to queue memory allocation
2379 */
2380 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2381 (bp->flags & GRO_ENABLE_FLAG &&
2382 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2383 if (bp->flags & TPA_ENABLE_FLAG)
2384 fp->mode = TPA_MODE_LRO;
2385 else if (bp->flags & GRO_ENABLE_FLAG)
2386 fp->mode = TPA_MODE_GRO;
2387
1191cb83
ED
2388 /* We don't want TPA on an FCoE L2 ring */
2389 if (IS_FCOE_FP(fp))
2390 fp->disable_tpa = 1;
55c11941
MS
2391}
2392
2393int bnx2x_load_cnic(struct bnx2x *bp)
2394{
2395 int i, rc, port = BP_PORT(bp);
2396
2397 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2398
2399 mutex_init(&bp->cnic_mutex);
2400
ad5afc89
AE
2401 if (IS_PF(bp)) {
2402 rc = bnx2x_alloc_mem_cnic(bp);
2403 if (rc) {
2404 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2405 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2406 }
55c11941
MS
2407 }
2408
2409 rc = bnx2x_alloc_fp_mem_cnic(bp);
2410 if (rc) {
2411 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2412 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2413 }
2414
2415 /* Update the number of queues with the cnic queues */
2416 rc = bnx2x_set_real_num_queues(bp, 1);
2417 if (rc) {
2418 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2419 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2420 }
2421
2422 /* Add all CNIC NAPI objects */
2423 bnx2x_add_all_napi_cnic(bp);
2424 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2425 bnx2x_napi_enable_cnic(bp);
2426
2427 rc = bnx2x_init_hw_func_cnic(bp);
2428 if (rc)
2429 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2430
2431 bnx2x_nic_init_cnic(bp);
2432
ad5afc89
AE
2433 if (IS_PF(bp)) {
2434 /* Enable Timer scan */
2435 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2436
2437 /* setup cnic queues */
2438 for_each_cnic_queue(bp, i) {
2439 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2440 if (rc) {
2441 BNX2X_ERR("Queue setup failed\n");
2442 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2443 }
55c11941
MS
2444 }
2445 }
2446
2447 /* Initialize Rx filter. */
2448 netif_addr_lock_bh(bp->dev);
2449 bnx2x_set_rx_mode(bp->dev);
2450 netif_addr_unlock_bh(bp->dev);
2451
2452 /* re-read iscsi info */
2453 bnx2x_get_iscsi_info(bp);
2454 bnx2x_setup_cnic_irq_info(bp);
2455 bnx2x_setup_cnic_info(bp);
2456 bp->cnic_loaded = true;
2457 if (bp->state == BNX2X_STATE_OPEN)
2458 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2459
2460
2461 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2462
2463 return 0;
2464
2465#ifndef BNX2X_STOP_ON_ERROR
2466load_error_cnic2:
2467 /* Disable Timer scan */
2468 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2469
2470load_error_cnic1:
2471 bnx2x_napi_disable_cnic(bp);
2472 /* Update the number of queues without the cnic queues */
2473 rc = bnx2x_set_real_num_queues(bp, 0);
2474 if (rc)
2475 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2476load_error_cnic0:
2477 BNX2X_ERR("CNIC-related load failed\n");
2478 bnx2x_free_fp_mem_cnic(bp);
2479 bnx2x_free_mem_cnic(bp);
2480 return rc;
2481#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2482}
2483
9f6c9258
DK
2484/* must be called with rtnl_lock */
2485int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2486{
619c5cb6 2487 int port = BP_PORT(bp);
ad5afc89 2488 int i, rc = 0, load_code = 0;
9f6c9258 2489
55c11941
MS
2490 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2491 DP(NETIF_MSG_IFUP,
2492 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2493
9f6c9258 2494#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2495 if (unlikely(bp->panic)) {
2496 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2497 return -EPERM;
51c1a580 2498 }
9f6c9258
DK
2499#endif
2500
2501 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2502
2ae17f66
VZ
2503 /* Set the initial link reported state to link down */
2504 bnx2x_acquire_phy_lock(bp);
2505 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2506 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2507 &bp->last_reported_link.link_report_flags);
2508 bnx2x_release_phy_lock(bp);
2509
ad5afc89
AE
2510 if (IS_PF(bp))
2511 /* must be called before memory allocation and HW init */
2512 bnx2x_ilt_set_info(bp);
523224a3 2513
6383c0b3
AE
2514 /*
2515 * Zero fastpath structures preserving invariants like napi, which are
2516 * allocated only once, fp index, max_cos, bp pointer.
65565884 2517 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2518 */
51c1a580 2519 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2520 for_each_queue(bp, i)
2521 bnx2x_bz_fp(bp, i);
55c11941
MS
2522 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2523 bp->num_cnic_queues) *
2524 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2525
55c11941 2526 bp->fcoe_init = false;
6383c0b3 2527
a8c94b91
VZ
2528 /* Set the receive queues buffer size */
2529 bnx2x_set_rx_buf_size(bp);
2530
ad5afc89
AE
2531 if (IS_PF(bp)) {
2532 rc = bnx2x_alloc_mem(bp);
2533 if (rc) {
2534 BNX2X_ERR("Unable to allocate bp memory\n");
2535 return rc;
2536 }
2537 }
2538
2539 /* Allocated memory for FW statistics */
2540 if (bnx2x_alloc_fw_stats_mem(bp))
2541 LOAD_ERROR_EXIT(bp, load_error0);
2542
2543 /* need to be done after alloc mem, since it's self adjusting to amount
2544 * of memory available for RSS queues
2545 */
2546 rc = bnx2x_alloc_fp_mem(bp);
2547 if (rc) {
2548 BNX2X_ERR("Unable to allocate memory for fps\n");
2549 LOAD_ERROR_EXIT(bp, load_error0);
2550 }
d6214d7a 2551
8d9ac297
AE
2552 /* request pf to initialize status blocks */
2553 if (IS_VF(bp)) {
2554 rc = bnx2x_vfpf_init(bp);
2555 if (rc)
2556 LOAD_ERROR_EXIT(bp, load_error0);
2557 }
2558
b3b83c3f
DK
2559 /* As long as bnx2x_alloc_mem() may possibly update
2560 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2561 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2562 */
55c11941 2563 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2564 if (rc) {
ec6ba945 2565 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2566 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2567 }
2568
6383c0b3
AE
2569 /* configure multi cos mappings in kernel.
2570 * this configuration may be overriden by a multi class queue discipline
2571 * or by a dcbx negotiation result.
2572 */
2573 bnx2x_setup_tc(bp->dev, bp->max_cos);
2574
26614ba5
MS
2575 /* Add all NAPI objects */
2576 bnx2x_add_all_napi(bp);
55c11941 2577 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2578 bnx2x_napi_enable(bp);
2579
ad5afc89
AE
2580 if (IS_PF(bp)) {
2581 /* set pf load just before approaching the MCP */
2582 bnx2x_set_pf_load(bp);
2583
2584 /* if mcp exists send load request and analyze response */
2585 if (!BP_NOMCP(bp)) {
2586 /* attempt to load pf */
2587 rc = bnx2x_nic_load_request(bp, &load_code);
2588 if (rc)
2589 LOAD_ERROR_EXIT(bp, load_error1);
2590
2591 /* what did mcp say? */
2592 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2593 if (rc) {
2594 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2595 LOAD_ERROR_EXIT(bp, load_error2);
2596 }
ad5afc89
AE
2597 } else {
2598 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2599 }
9f6c9258 2600
ad5afc89
AE
2601 /* mark pmf if applicable */
2602 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2603
ad5afc89
AE
2604 /* Init Function state controlling object */
2605 bnx2x__init_func_obj(bp);
6383c0b3 2606
ad5afc89
AE
2607 /* Initialize HW */
2608 rc = bnx2x_init_hw(bp, load_code);
2609 if (rc) {
2610 BNX2X_ERR("HW init failed, aborting\n");
2611 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2612 LOAD_ERROR_EXIT(bp, load_error2);
2613 }
9f6c9258
DK
2614 }
2615
d6214d7a
DK
2616 /* Connect to IRQs */
2617 rc = bnx2x_setup_irqs(bp);
523224a3 2618 if (rc) {
ad5afc89
AE
2619 BNX2X_ERR("setup irqs failed\n");
2620 if (IS_PF(bp))
2621 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2622 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2623 }
2624
9f6c9258
DK
2625 /* Setup NIC internals and enable interrupts */
2626 bnx2x_nic_init(bp, load_code);
2627
619c5cb6 2628 /* Init per-function objects */
ad5afc89
AE
2629 if (IS_PF(bp)) {
2630 bnx2x_init_bp_objs(bp);
b56e9670 2631 bnx2x_iov_nic_init(bp);
a3348722 2632
ad5afc89
AE
2633 /* Set AFEX default VLAN tag to an invalid value */
2634 bp->afex_def_vlan_tag = -1;
2635 bnx2x_nic_load_afex_dcc(bp, load_code);
2636 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2637 rc = bnx2x_func_start(bp);
2638 if (rc) {
2639 BNX2X_ERR("Function start failed!\n");
2640 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2641
619c5cb6 2642 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2643 }
9f6c9258 2644
ad5afc89
AE
2645 /* Send LOAD_DONE command to MCP */
2646 if (!BP_NOMCP(bp)) {
2647 load_code = bnx2x_fw_command(bp,
2648 DRV_MSG_CODE_LOAD_DONE, 0);
2649 if (!load_code) {
2650 BNX2X_ERR("MCP response failure, aborting\n");
2651 rc = -EBUSY;
2652 LOAD_ERROR_EXIT(bp, load_error3);
2653 }
2654 }
9f6c9258 2655
ad5afc89
AE
2656 /* setup the leading queue */
2657 rc = bnx2x_setup_leading(bp);
51c1a580 2658 if (rc) {
ad5afc89 2659 BNX2X_ERR("Setup leading failed!\n");
55c11941 2660 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2661 }
523224a3 2662
ad5afc89
AE
2663 /* set up the rest of the queues */
2664 for_each_nondefault_eth_queue(bp, i) {
2665 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2666 if (rc) {
2667 BNX2X_ERR("Queue setup failed\n");
2668 LOAD_ERROR_EXIT(bp, load_error3);
2669 }
2670 }
2671
2672 /* setup rss */
2673 rc = bnx2x_init_rss_pf(bp);
2674 if (rc) {
2675 BNX2X_ERR("PF RSS init failed\n");
2676 LOAD_ERROR_EXIT(bp, load_error3);
2677 }
8d9ac297
AE
2678
2679 } else { /* vf */
2680 for_each_eth_queue(bp, i) {
2681 rc = bnx2x_vfpf_setup_q(bp, i);
2682 if (rc) {
2683 BNX2X_ERR("Queue setup failed\n");
2684 LOAD_ERROR_EXIT(bp, load_error3);
2685 }
2686 }
51c1a580 2687 }
619c5cb6 2688
523224a3
DK
2689 /* Now when Clients are configured we are ready to work */
2690 bp->state = BNX2X_STATE_OPEN;
2691
619c5cb6 2692 /* Configure a ucast MAC */
ad5afc89
AE
2693 if (IS_PF(bp))
2694 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297
AE
2695 else /* vf */
2696 rc = bnx2x_vfpf_set_mac(bp);
51c1a580
MS
2697 if (rc) {
2698 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2699 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2700 }
6e30dd4e 2701
ad5afc89 2702 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2703 bnx2x_update_max_mf_config(bp, bp->pending_max);
2704 bp->pending_max = 0;
2705 }
2706
ad5afc89
AE
2707 if (bp->port.pmf) {
2708 rc = bnx2x_initial_phy_init(bp, load_mode);
2709 if (rc)
2710 LOAD_ERROR_EXIT(bp, load_error3);
2711 }
c63da990 2712 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2713
619c5cb6
VZ
2714 /* Start fast path */
2715
2716 /* Initialize Rx filter. */
2717 netif_addr_lock_bh(bp->dev);
6e30dd4e 2718 bnx2x_set_rx_mode(bp->dev);
619c5cb6 2719 netif_addr_unlock_bh(bp->dev);
6e30dd4e 2720
619c5cb6 2721 /* Start the Tx */
9f6c9258
DK
2722 switch (load_mode) {
2723 case LOAD_NORMAL:
523224a3
DK
2724 /* Tx queue should be only reenabled */
2725 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2726 break;
2727
2728 case LOAD_OPEN:
2729 netif_tx_start_all_queues(bp->dev);
523224a3 2730 smp_mb__after_clear_bit();
9f6c9258
DK
2731 break;
2732
2733 case LOAD_DIAG:
8970b2e4 2734 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2735 bp->state = BNX2X_STATE_DIAG;
2736 break;
2737
2738 default:
2739 break;
2740 }
2741
00253a8c 2742 if (bp->port.pmf)
4c704899 2743 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2744 else
9f6c9258
DK
2745 bnx2x__link_status_update(bp);
2746
2747 /* start the timer */
2748 mod_timer(&bp->timer, jiffies + bp->current_interval);
2749
55c11941
MS
2750 if (CNIC_ENABLED(bp))
2751 bnx2x_load_cnic(bp);
9f6c9258 2752
ad5afc89
AE
2753 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2754 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2755 u32 val;
2756 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2757 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2758 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2759 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2760 }
2761
619c5cb6 2762 /* Wait for all pending SP commands to complete */
ad5afc89 2763 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2764 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2765 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2766 return -EBUSY;
2767 }
6891dd25 2768
9876879f
BW
2769 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2770 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2771 bnx2x_dcbx_init(bp, false);
2772
55c11941
MS
2773 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2774
9f6c9258
DK
2775 return 0;
2776
619c5cb6 2777#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2778load_error3:
ad5afc89
AE
2779 if (IS_PF(bp)) {
2780 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2781
ad5afc89
AE
2782 /* Clean queueable objects */
2783 bnx2x_squeeze_objects(bp);
2784 }
619c5cb6 2785
9f6c9258
DK
2786 /* Free SKBs, SGEs, TPA pool and driver internals */
2787 bnx2x_free_skbs(bp);
ec6ba945 2788 for_each_rx_queue(bp, i)
9f6c9258 2789 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2790
9f6c9258 2791 /* Release IRQs */
d6214d7a
DK
2792 bnx2x_free_irq(bp);
2793load_error2:
ad5afc89 2794 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2795 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2796 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2797 }
2798
2799 bp->port.pmf = 0;
9f6c9258
DK
2800load_error1:
2801 bnx2x_napi_disable(bp);
ad5afc89 2802
889b9af3 2803 /* clear pf_load status, as it was already set */
ad5afc89
AE
2804 if (IS_PF(bp))
2805 bnx2x_clear_pf_load(bp);
d6214d7a 2806load_error0:
ad5afc89
AE
2807 bnx2x_free_fp_mem(bp);
2808 bnx2x_free_fw_stats_mem(bp);
9f6c9258
DK
2809 bnx2x_free_mem(bp);
2810
2811 return rc;
619c5cb6 2812#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2813}
2814
ad5afc89
AE
2815static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2816{
2817 u8 rc = 0, cos, i;
2818
2819 /* Wait until tx fastpath tasks complete */
2820 for_each_tx_queue(bp, i) {
2821 struct bnx2x_fastpath *fp = &bp->fp[i];
2822
2823 for_each_cos_in_tx_queue(fp, cos)
2824 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2825 if (rc)
2826 return rc;
2827 }
2828 return 0;
2829}
2830
9f6c9258 2831/* must be called with rtnl_lock */
5d07d868 2832int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2833{
2834 int i;
c9ee9206
VZ
2835 bool global = false;
2836
55c11941
MS
2837 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2838
9ce392d4 2839 /* mark driver is unloaded in shmem2 */
ad5afc89 2840 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2841 u32 val;
2842 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2843 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2844 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2845 }
2846
ad5afc89
AE
2847 if (IS_PF(bp) &&
2848 (bp->state == BNX2X_STATE_CLOSED ||
2849 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2850 /* We can get here if the driver has been unloaded
2851 * during parity error recovery and is either waiting for a
2852 * leader to complete or for other functions to unload and
2853 * then ifdown has been issued. In this case we want to
2854 * unload and let other functions to complete a recovery
2855 * process.
2856 */
9f6c9258
DK
2857 bp->recovery_state = BNX2X_RECOVERY_DONE;
2858 bp->is_leader = 0;
c9ee9206
VZ
2859 bnx2x_release_leader_lock(bp);
2860 smp_mb();
2861
51c1a580
MS
2862 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2863 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2864 return -EINVAL;
2865 }
2866
87b7ba3d
VZ
2867 /*
2868 * It's important to set the bp->state to the value different from
2869 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2870 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2871 */
2872 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2873 smp_mb();
2874
55c11941
MS
2875 if (CNIC_LOADED(bp))
2876 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2877
9505ee37
VZ
2878 /* Stop Tx */
2879 bnx2x_tx_disable(bp);
65565884 2880 netdev_reset_tc(bp->dev);
9505ee37 2881
9f6c9258 2882 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2883
9f6c9258 2884 del_timer_sync(&bp->timer);
f85582f8 2885
ad5afc89
AE
2886 if (IS_PF(bp)) {
2887 /* Set ALWAYS_ALIVE bit in shmem */
2888 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2889 bnx2x_drv_pulse(bp);
2890 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2891 bnx2x_save_statistics(bp);
2892 }
9f6c9258 2893
ad5afc89
AE
2894 /* wait till consumers catch up with producers in all queues */
2895 bnx2x_drain_tx_queues(bp);
9f6c9258 2896
9b176b6b
AE
2897 /* if VF indicate to PF this function is going down (PF will delete sp
2898 * elements and clear initializations
2899 */
2900 if (IS_VF(bp))
2901 bnx2x_vfpf_close_vf(bp);
2902 else if (unload_mode != UNLOAD_RECOVERY)
2903 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2904 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2905 else {
c9ee9206
VZ
2906 /* Send the UNLOAD_REQUEST to the MCP */
2907 bnx2x_send_unload_req(bp, unload_mode);
2908
2909 /*
2910 * Prevent transactions to host from the functions on the
2911 * engine that doesn't reset global blocks in case of global
2912 * attention once gloabl blocks are reset and gates are opened
2913 * (the engine which leader will perform the recovery
2914 * last).
2915 */
2916 if (!CHIP_IS_E1x(bp))
2917 bnx2x_pf_disable(bp);
2918
2919 /* Disable HW interrupts, NAPI */
523224a3 2920 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2921 /* Delete all NAPI objects */
2922 bnx2x_del_all_napi(bp);
55c11941
MS
2923 if (CNIC_LOADED(bp))
2924 bnx2x_del_all_napi_cnic(bp);
523224a3 2925 /* Release IRQs */
d6214d7a 2926 bnx2x_free_irq(bp);
c9ee9206
VZ
2927
2928 /* Report UNLOAD_DONE to MCP */
5d07d868 2929 bnx2x_send_unload_done(bp, false);
523224a3 2930 }
9f6c9258 2931
619c5cb6
VZ
2932 /*
2933 * At this stage no more interrupts will arrive so we may safly clean
2934 * the queueable objects here in case they failed to get cleaned so far.
2935 */
ad5afc89
AE
2936 if (IS_PF(bp))
2937 bnx2x_squeeze_objects(bp);
619c5cb6 2938
79616895
VZ
2939 /* There should be no more pending SP commands at this stage */
2940 bp->sp_state = 0;
2941
9f6c9258
DK
2942 bp->port.pmf = 0;
2943
2944 /* Free SKBs, SGEs, TPA pool and driver internals */
2945 bnx2x_free_skbs(bp);
55c11941
MS
2946 if (CNIC_LOADED(bp))
2947 bnx2x_free_skbs_cnic(bp);
ec6ba945 2948 for_each_rx_queue(bp, i)
9f6c9258 2949 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2950
ad5afc89
AE
2951 bnx2x_free_fp_mem(bp);
2952 if (CNIC_LOADED(bp))
55c11941 2953 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2954
ad5afc89
AE
2955 if (IS_PF(bp)) {
2956 bnx2x_free_mem(bp);
2957 if (CNIC_LOADED(bp))
2958 bnx2x_free_mem_cnic(bp);
2959 }
9f6c9258 2960 bp->state = BNX2X_STATE_CLOSED;
55c11941 2961 bp->cnic_loaded = false;
9f6c9258 2962
c9ee9206
VZ
2963 /* Check if there are pending parity attentions. If there are - set
2964 * RECOVERY_IN_PROGRESS.
2965 */
ad5afc89 2966 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2967 bnx2x_set_reset_in_progress(bp);
2968
2969 /* Set RESET_IS_GLOBAL if needed */
2970 if (global)
2971 bnx2x_set_reset_global(bp);
2972 }
2973
2974
9f6c9258
DK
2975 /* The last driver must disable a "close the gate" if there is no
2976 * parity attention or "process kill" pending.
2977 */
ad5afc89
AE
2978 if (IS_PF(bp) &&
2979 !bnx2x_clear_pf_load(bp) &&
2980 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2981 bnx2x_disable_close_the_gate(bp);
2982
55c11941
MS
2983 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2984
9f6c9258
DK
2985 return 0;
2986}
f85582f8 2987
9f6c9258
DK
2988int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2989{
2990 u16 pmcsr;
2991
adf5f6a1
DK
2992 /* If there is no power capability, silently succeed */
2993 if (!bp->pm_cap) {
51c1a580 2994 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
2995 return 0;
2996 }
2997
9f6c9258
DK
2998 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2999
3000 switch (state) {
3001 case PCI_D0:
3002 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3003 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3004 PCI_PM_CTRL_PME_STATUS));
3005
3006 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3007 /* delay required during transition out of D3hot */
3008 msleep(20);
3009 break;
3010
3011 case PCI_D3hot:
3012 /* If there are other clients above don't
3013 shut down the power */
3014 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3015 return 0;
3016 /* Don't shut down the power for emulation and FPGA */
3017 if (CHIP_REV_IS_SLOW(bp))
3018 return 0;
3019
3020 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3021 pmcsr |= 3;
3022
3023 if (bp->wol)
3024 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3025
3026 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3027 pmcsr);
3028
3029 /* No more memory access after this point until
3030 * device is brought back to D0.
3031 */
3032 break;
3033
3034 default:
51c1a580 3035 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3036 return -EINVAL;
3037 }
3038 return 0;
3039}
3040
9f6c9258
DK
3041/*
3042 * net_device service functions
3043 */
d6214d7a 3044int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3045{
3046 int work_done = 0;
6383c0b3 3047 u8 cos;
9f6c9258
DK
3048 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3049 napi);
3050 struct bnx2x *bp = fp->bp;
3051
3052 while (1) {
3053#ifdef BNX2X_STOP_ON_ERROR
3054 if (unlikely(bp->panic)) {
3055 napi_complete(napi);
3056 return 0;
3057 }
3058#endif
3059
6383c0b3 3060 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3061 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3062 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3063
9f6c9258
DK
3064 if (bnx2x_has_rx_work(fp)) {
3065 work_done += bnx2x_rx_int(fp, budget - work_done);
3066
3067 /* must not complete if we consumed full budget */
3068 if (work_done >= budget)
3069 break;
3070 }
3071
3072 /* Fall out from the NAPI loop if needed */
3073 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3074
ec6ba945
VZ
3075 /* No need to update SB for FCoE L2 ring as long as
3076 * it's connected to the default SB and the SB
3077 * has been updated when NAPI was scheduled.
3078 */
3079 if (IS_FCOE_FP(fp)) {
3080 napi_complete(napi);
3081 break;
3082 }
9f6c9258 3083 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3084 /* bnx2x_has_rx_work() reads the status block,
3085 * thus we need to ensure that status block indices
3086 * have been actually read (bnx2x_update_fpsb_idx)
3087 * prior to this check (bnx2x_has_rx_work) so that
3088 * we won't write the "newer" value of the status block
3089 * to IGU (if there was a DMA right after
3090 * bnx2x_has_rx_work and if there is no rmb, the memory
3091 * reading (bnx2x_update_fpsb_idx) may be postponed
3092 * to right before bnx2x_ack_sb). In this case there
3093 * will never be another interrupt until there is
3094 * another update of the status block, while there
3095 * is still unhandled work.
3096 */
9f6c9258
DK
3097 rmb();
3098
3099 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3100 napi_complete(napi);
3101 /* Re-enable interrupts */
51c1a580 3102 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3103 "Update index to %d\n", fp->fp_hc_idx);
3104 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3105 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3106 IGU_INT_ENABLE, 1);
3107 break;
3108 }
3109 }
3110 }
3111
3112 return work_done;
3113}
3114
9f6c9258
DK
3115/* we split the first BD into headers and data BDs
3116 * to ease the pain of our fellow microcode engineers
3117 * we use one mapping for both BDs
9f6c9258
DK
3118 */
3119static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
6383c0b3 3120 struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
3121 struct sw_tx_bd *tx_buf,
3122 struct eth_tx_start_bd **tx_bd, u16 hlen,
3123 u16 bd_prod, int nbd)
3124{
3125 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3126 struct eth_tx_bd *d_tx_bd;
3127 dma_addr_t mapping;
3128 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3129
3130 /* first fix first BD */
3131 h_tx_bd->nbd = cpu_to_le16(nbd);
3132 h_tx_bd->nbytes = cpu_to_le16(hlen);
3133
51c1a580
MS
3134 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
3135 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
9f6c9258
DK
3136
3137 /* now get a new data BD
3138 * (after the pbd) and fill it */
3139 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3140 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3141
3142 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3143 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3144
3145 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3146 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3147 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3148
3149 /* this marks the BD as one that has no individual mapping */
3150 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3151
3152 DP(NETIF_MSG_TX_QUEUED,
3153 "TSO split data size is %d (%x:%x)\n",
3154 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3155
3156 /* update tx_bd */
3157 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3158
3159 return bd_prod;
3160}
3161
3162static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3163{
3164 if (fix > 0)
3165 csum = (u16) ~csum_fold(csum_sub(csum,
3166 csum_partial(t_header - fix, fix, 0)));
3167
3168 else if (fix < 0)
3169 csum = (u16) ~csum_fold(csum_add(csum,
3170 csum_partial(t_header, -fix, 0)));
3171
3172 return swab16(csum);
3173}
3174
3175static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3176{
3177 u32 rc;
3178
3179 if (skb->ip_summed != CHECKSUM_PARTIAL)
3180 rc = XMIT_PLAIN;
3181
3182 else {
d0d9d8ef 3183 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
3184 rc = XMIT_CSUM_V6;
3185 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3186 rc |= XMIT_CSUM_TCP;
3187
3188 } else {
3189 rc = XMIT_CSUM_V4;
3190 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3191 rc |= XMIT_CSUM_TCP;
3192 }
3193 }
3194
5892b9e9
VZ
3195 if (skb_is_gso_v6(skb))
3196 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
3197 else if (skb_is_gso(skb))
3198 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
3199
3200 return rc;
3201}
3202
3203#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3204/* check if packet requires linearization (packet is too fragmented)
3205 no need to check fragmentation if page size > 8K (there will be no
3206 violation to FW restrictions) */
3207static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3208 u32 xmit_type)
3209{
3210 int to_copy = 0;
3211 int hlen = 0;
3212 int first_bd_sz = 0;
3213
3214 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3215 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3216
3217 if (xmit_type & XMIT_GSO) {
3218 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3219 /* Check if LSO packet needs to be copied:
3220 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3221 int wnd_size = MAX_FETCH_BD - 3;
3222 /* Number of windows to check */
3223 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3224 int wnd_idx = 0;
3225 int frag_idx = 0;
3226 u32 wnd_sum = 0;
3227
3228 /* Headers length */
3229 hlen = (int)(skb_transport_header(skb) - skb->data) +
3230 tcp_hdrlen(skb);
3231
3232 /* Amount of data (w/o headers) on linear part of SKB*/
3233 first_bd_sz = skb_headlen(skb) - hlen;
3234
3235 wnd_sum = first_bd_sz;
3236
3237 /* Calculate the first sum - it's special */
3238 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3239 wnd_sum +=
9e903e08 3240 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3241
3242 /* If there was data on linear skb data - check it */
3243 if (first_bd_sz > 0) {
3244 if (unlikely(wnd_sum < lso_mss)) {
3245 to_copy = 1;
3246 goto exit_lbl;
3247 }
3248
3249 wnd_sum -= first_bd_sz;
3250 }
3251
3252 /* Others are easier: run through the frag list and
3253 check all windows */
3254 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3255 wnd_sum +=
9e903e08 3256 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3257
3258 if (unlikely(wnd_sum < lso_mss)) {
3259 to_copy = 1;
3260 break;
3261 }
3262 wnd_sum -=
9e903e08 3263 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3264 }
3265 } else {
3266 /* in non-LSO too fragmented packet should always
3267 be linearized */
3268 to_copy = 1;
3269 }
3270 }
3271
3272exit_lbl:
3273 if (unlikely(to_copy))
3274 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3275 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3276 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3277 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3278
3279 return to_copy;
3280}
3281#endif
3282
2297a2da
VZ
3283static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3284 u32 xmit_type)
f2e0899f 3285{
2297a2da
VZ
3286 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3287 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3288 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
3289 if ((xmit_type & XMIT_GSO_V6) &&
3290 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 3291 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3292}
3293
3294/**
e8920674 3295 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3296 *
e8920674
DK
3297 * @skb: packet skb
3298 * @pbd: parse BD
3299 * @xmit_type: xmit flags
f2e0899f
DK
3300 */
3301static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3302 struct eth_tx_parse_bd_e1x *pbd,
3303 u32 xmit_type)
3304{
3305 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3306 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
3307 pbd->tcp_flags = pbd_tcp_flags(skb);
3308
3309 if (xmit_type & XMIT_GSO_V4) {
3310 pbd->ip_id = swab16(ip_hdr(skb)->id);
3311 pbd->tcp_pseudo_csum =
3312 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3313 ip_hdr(skb)->daddr,
3314 0, IPPROTO_TCP, 0));
3315
3316 } else
3317 pbd->tcp_pseudo_csum =
3318 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3319 &ipv6_hdr(skb)->daddr,
3320 0, IPPROTO_TCP, 0));
3321
3322 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
3323}
f85582f8 3324
f2e0899f 3325/**
e8920674 3326 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3327 *
e8920674
DK
3328 * @bp: driver handle
3329 * @skb: packet skb
3330 * @parsing_data: data to be updated
3331 * @xmit_type: xmit flags
f2e0899f 3332 *
e8920674 3333 * 57712 related
f2e0899f
DK
3334 */
3335static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2de67439 3336 u32 *parsing_data, u32 xmit_type)
f2e0899f 3337{
e39aece7 3338 *parsing_data |=
2de67439
YM
3339 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3340 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3341 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 3342
e39aece7
VZ
3343 if (xmit_type & XMIT_CSUM_TCP) {
3344 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3345 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3346 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3347
e39aece7
VZ
3348 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3349 } else
3350 /* We support checksum offload for TCP and UDP only.
3351 * No need to pass the UDP header length - it's a constant.
3352 */
3353 return skb_transport_header(skb) +
3354 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3355}
3356
93ef5c02
DK
3357static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3358 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3359{
93ef5c02
DK
3360 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3361
3362 if (xmit_type & XMIT_CSUM_V4)
3363 tx_start_bd->bd_flags.as_bitfield |=
3364 ETH_TX_BD_FLAGS_IP_CSUM;
3365 else
3366 tx_start_bd->bd_flags.as_bitfield |=
3367 ETH_TX_BD_FLAGS_IPV6;
3368
3369 if (!(xmit_type & XMIT_CSUM_TCP))
3370 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3371}
3372
f2e0899f 3373/**
e8920674 3374 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3375 *
e8920674
DK
3376 * @bp: driver handle
3377 * @skb: packet skb
3378 * @pbd: parse BD to be updated
3379 * @xmit_type: xmit flags
f2e0899f
DK
3380 */
3381static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3382 struct eth_tx_parse_bd_e1x *pbd,
3383 u32 xmit_type)
3384{
e39aece7 3385 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3386
3387 /* for now NS flag is not used in Linux */
3388 pbd->global_data =
3389 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3390 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3391
3392 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3393 skb_network_header(skb)) >> 1;
f2e0899f 3394
e39aece7
VZ
3395 hlen += pbd->ip_hlen_w;
3396
3397 /* We support checksum offload for TCP and UDP only */
3398 if (xmit_type & XMIT_CSUM_TCP)
3399 hlen += tcp_hdrlen(skb) / 2;
3400 else
3401 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3402
3403 pbd->total_hlen_w = cpu_to_le16(hlen);
3404 hlen = hlen*2;
3405
3406 if (xmit_type & XMIT_CSUM_TCP) {
3407 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3408
3409 } else {
3410 s8 fix = SKB_CS_OFF(skb); /* signed! */
3411
3412 DP(NETIF_MSG_TX_QUEUED,
3413 "hlen %d fix %d csum before fix %x\n",
3414 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3415
3416 /* HW bug: fixup the CSUM */
3417 pbd->tcp_pseudo_csum =
3418 bnx2x_csum_fix(skb_transport_header(skb),
3419 SKB_CS(skb), fix);
3420
3421 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3422 pbd->tcp_pseudo_csum);
3423 }
3424
3425 return hlen;
3426}
f85582f8 3427
9f6c9258
DK
3428/* called with netif_tx_lock
3429 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3430 * netif_wake_queue()
3431 */
3432netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3433{
3434 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3435
9f6c9258 3436 struct netdev_queue *txq;
6383c0b3 3437 struct bnx2x_fp_txdata *txdata;
9f6c9258 3438 struct sw_tx_bd *tx_buf;
619c5cb6 3439 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3440 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3441 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3442 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 3443 u32 pbd_e2_parsing_data = 0;
9f6c9258 3444 u16 pkt_prod, bd_prod;
65565884 3445 int nbd, txq_index;
9f6c9258
DK
3446 dma_addr_t mapping;
3447 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3448 int i;
3449 u8 hlen = 0;
3450 __le16 pkt_size = 0;
3451 struct ethhdr *eth;
3452 u8 mac_type = UNICAST_ADDRESS;
3453
3454#ifdef BNX2X_STOP_ON_ERROR
3455 if (unlikely(bp->panic))
3456 return NETDEV_TX_BUSY;
3457#endif
3458
6383c0b3
AE
3459 txq_index = skb_get_queue_mapping(skb);
3460 txq = netdev_get_tx_queue(dev, txq_index);
3461
55c11941 3462 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3463
65565884 3464 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3465
3466 /* enable this debug print to view the transmission queue being used
51c1a580 3467 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3468 txq_index, fp_index, txdata_index); */
9f6c9258 3469
6383c0b3 3470 /* enable this debug print to view the tranmission details
51c1a580
MS
3471 DP(NETIF_MSG_TX_QUEUED,
3472 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3473 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3474
6383c0b3 3475 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3476 skb_shinfo(skb)->nr_frags +
3477 BDS_PER_TX_PKT +
3478 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3479 /* Handle special storage cases separately */
c96bdc0c
DK
3480 if (txdata->tx_ring_size == 0) {
3481 struct bnx2x_eth_q_stats *q_stats =
3482 bnx2x_fp_qstats(bp, txdata->parent_fp);
3483 q_stats->driver_filtered_tx_pkt++;
3484 dev_kfree_skb(skb);
3485 return NETDEV_TX_OK;
3486 }
2de67439
YM
3487 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3488 netif_tx_stop_queue(txq);
c96bdc0c 3489 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3490
9f6c9258
DK
3491 return NETDEV_TX_BUSY;
3492 }
3493
51c1a580
MS
3494 DP(NETIF_MSG_TX_QUEUED,
3495 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
6383c0b3 3496 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
3497 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3498
3499 eth = (struct ethhdr *)skb->data;
3500
3501 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3502 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3503 if (is_broadcast_ether_addr(eth->h_dest))
3504 mac_type = BROADCAST_ADDRESS;
3505 else
3506 mac_type = MULTICAST_ADDRESS;
3507 }
3508
3509#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3510 /* First, check if we need to linearize the skb (due to FW
3511 restrictions). No need to check fragmentation if page size > 8K
3512 (there will be no violation to FW restrictions) */
3513 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3514 /* Statistics of linearization */
3515 bp->lin_cnt++;
3516 if (skb_linearize(skb) != 0) {
51c1a580
MS
3517 DP(NETIF_MSG_TX_QUEUED,
3518 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3519 dev_kfree_skb_any(skb);
3520 return NETDEV_TX_OK;
3521 }
3522 }
3523#endif
619c5cb6
VZ
3524 /* Map skb linear data for DMA */
3525 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3526 skb_headlen(skb), DMA_TO_DEVICE);
3527 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3528 DP(NETIF_MSG_TX_QUEUED,
3529 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3530 dev_kfree_skb_any(skb);
3531 return NETDEV_TX_OK;
3532 }
9f6c9258
DK
3533 /*
3534 Please read carefully. First we use one BD which we mark as start,
3535 then we have a parsing info BD (used for TSO or xsum),
3536 and only then we have the rest of the TSO BDs.
3537 (don't forget to mark the last one as last,
3538 and to unmap only AFTER you write to the BD ...)
3539 And above all, all pdb sizes are in words - NOT DWORDS!
3540 */
3541
619c5cb6
VZ
3542 /* get current pkt produced now - advance it just before sending packet
3543 * since mapping of pages may fail and cause packet to be dropped
3544 */
6383c0b3
AE
3545 pkt_prod = txdata->tx_pkt_prod;
3546 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3547
619c5cb6
VZ
3548 /* get a tx_buf and first BD
3549 * tx_start_bd may be changed during SPLIT,
3550 * but first_bd will always stay first
3551 */
6383c0b3
AE
3552 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3553 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3554 first_bd = tx_start_bd;
9f6c9258
DK
3555
3556 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
96bed4b9
YM
3557 SET_FLAG(tx_start_bd->general_data,
3558 ETH_TX_START_BD_PARSE_NBDS,
3559 0);
f85582f8 3560
9f6c9258 3561 /* header nbd */
f85582f8 3562 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
3563
3564 /* remember the first BD of the packet */
6383c0b3 3565 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3566 tx_buf->skb = skb;
3567 tx_buf->flags = 0;
3568
3569 DP(NETIF_MSG_TX_QUEUED,
3570 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3571 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3572
eab6d18d 3573 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3574 tx_start_bd->vlan_or_ethertype =
3575 cpu_to_le16(vlan_tx_tag_get(skb));
3576 tx_start_bd->bd_flags.as_bitfield |=
3577 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3578 } else {
3579 /* when transmitting in a vf, start bd must hold the ethertype
3580 * for fw to enforce it
3581 */
823e1d90 3582#ifndef BNX2X_STOP_ON_ERROR
dc1ba591 3583 if (IS_VF(bp)) {
823e1d90 3584#endif
dc1ba591
AE
3585 tx_start_bd->vlan_or_ethertype =
3586 cpu_to_le16(ntohs(eth->h_proto));
823e1d90 3587#ifndef BNX2X_STOP_ON_ERROR
dc1ba591
AE
3588 } else {
3589 /* used by FW for packet accounting */
3590 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3591 }
823e1d90 3592#endif
dc1ba591 3593 }
9f6c9258
DK
3594
3595 /* turn on parsing and get a BD */
3596 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3597
93ef5c02
DK
3598 if (xmit_type & XMIT_CSUM)
3599 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3600
619c5cb6 3601 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3602 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f
DK
3603 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3604 /* Set PBD in checksum offload case */
3605 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
3606 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3607 &pbd_e2_parsing_data,
3608 xmit_type);
dc1ba591
AE
3609
3610 if (IS_MF_SI(bp) || IS_VF(bp)) {
3611 /* fill in the MAC addresses in the PBD - for local
619c5cb6
VZ
3612 * switching
3613 */
3614 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3615 &pbd_e2->src_mac_addr_mid,
3616 &pbd_e2->src_mac_addr_lo,
3617 eth->h_source);
3618 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3619 &pbd_e2->dst_mac_addr_mid,
3620 &pbd_e2->dst_mac_addr_lo,
3621 eth->h_dest);
3622 }
96bed4b9
YM
3623
3624 SET_FLAG(pbd_e2_parsing_data,
3625 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3626 } else {
96bed4b9 3627 u16 global_data = 0;
6383c0b3 3628 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3629 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3630 /* Set PBD in checksum offload case */
3631 if (xmit_type & XMIT_CSUM)
3632 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3633
96bed4b9
YM
3634 SET_FLAG(global_data,
3635 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3636 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3637 }
3638
f85582f8 3639 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3640 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3641 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
619c5cb6 3642 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
9f6c9258
DK
3643 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3644 pkt_size = tx_start_bd->nbytes;
3645
51c1a580
MS
3646 DP(NETIF_MSG_TX_QUEUED,
3647 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
9f6c9258
DK
3648 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3649 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3650 tx_start_bd->bd_flags.as_bitfield,
3651 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3652
3653 if (xmit_type & XMIT_GSO) {
3654
3655 DP(NETIF_MSG_TX_QUEUED,
3656 "TSO packet len %d hlen %d total len %d tso size %d\n",
3657 skb->len, hlen, skb_headlen(skb),
3658 skb_shinfo(skb)->gso_size);
3659
3660 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3661
3662 if (unlikely(skb_headlen(skb) > hlen))
6383c0b3
AE
3663 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3664 &tx_start_bd, hlen,
3665 bd_prod, ++nbd);
619c5cb6 3666 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3667 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3668 xmit_type);
f2e0899f
DK
3669 else
3670 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 3671 }
2297a2da
VZ
3672
3673 /* Set the PBD's parsing_data field if not zero
3674 * (for the chips newer than 57711).
3675 */
3676 if (pbd_e2_parsing_data)
3677 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3678
9f6c9258
DK
3679 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3680
f85582f8 3681 /* Handle fragmented skb */
9f6c9258
DK
3682 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3683 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3684
9e903e08
ED
3685 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3686 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3687 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3688 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3689
51c1a580
MS
3690 DP(NETIF_MSG_TX_QUEUED,
3691 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3692
3693 /* we need unmap all buffers already mapped
3694 * for this SKB;
3695 * first_bd->nbd need to be properly updated
3696 * before call to bnx2x_free_tx_pkt
3697 */
3698 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3699 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3700 TX_BD(txdata->tx_pkt_prod),
3701 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3702 return NETDEV_TX_OK;
3703 }
3704
9f6c9258 3705 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3706 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3707 if (total_pkt_bd == NULL)
6383c0b3 3708 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3709
9f6c9258
DK
3710 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3711 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3712 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3713 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3714 nbd++;
9f6c9258
DK
3715
3716 DP(NETIF_MSG_TX_QUEUED,
3717 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3718 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3719 le16_to_cpu(tx_data_bd->nbytes));
3720 }
3721
3722 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3723
619c5cb6
VZ
3724 /* update with actual num BDs */
3725 first_bd->nbd = cpu_to_le16(nbd);
3726
9f6c9258
DK
3727 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3728
3729 /* now send a tx doorbell, counting the next BD
3730 * if the packet contains or ends with it
3731 */
3732 if (TX_BD_POFF(bd_prod) < nbd)
3733 nbd++;
3734
619c5cb6
VZ
3735 /* total_pkt_bytes should be set on the first data BD if
3736 * it's not an LSO packet and there is more than one
3737 * data BD. In this case pkt_size is limited by an MTU value.
3738 * However we prefer to set it for an LSO packet (while we don't
3739 * have to) in order to save some CPU cycles in a none-LSO
3740 * case, when we much more care about them.
3741 */
9f6c9258
DK
3742 if (total_pkt_bd != NULL)
3743 total_pkt_bd->total_pkt_bytes = pkt_size;
3744
523224a3 3745 if (pbd_e1x)
9f6c9258 3746 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3747 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3748 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3749 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3750 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3751 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3752 if (pbd_e2)
3753 DP(NETIF_MSG_TX_QUEUED,
3754 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3755 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3756 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3757 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3758 pbd_e2->parsing_data);
9f6c9258
DK
3759 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3760
2df1a70a
TH
3761 netdev_tx_sent_queue(txq, skb->len);
3762
8373c57d
WB
3763 skb_tx_timestamp(skb);
3764
6383c0b3 3765 txdata->tx_pkt_prod++;
9f6c9258
DK
3766 /*
3767 * Make sure that the BD data is updated before updating the producer
3768 * since FW might read the BD right after the producer is updated.
3769 * This is only applicable for weak-ordered memory model archs such
3770 * as IA-64. The following barrier is also mandatory since FW will
3771 * assumes packets must have BDs.
3772 */
3773 wmb();
3774
6383c0b3 3775 txdata->tx_db.data.prod += nbd;
9f6c9258 3776 barrier();
f85582f8 3777
6383c0b3 3778 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
3779
3780 mmiowb();
3781
6383c0b3 3782 txdata->tx_bd_prod += nbd;
9f6c9258 3783
7df2dc6b 3784 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
3785 netif_tx_stop_queue(txq);
3786
3787 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3788 * ordering of set_bit() in netif_tx_stop_queue() and read of
3789 * fp->bd_tx_cons */
3790 smp_mb();
3791
15192a8c 3792 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 3793 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
3794 netif_tx_wake_queue(txq);
3795 }
6383c0b3 3796 txdata->tx_pkt++;
9f6c9258
DK
3797
3798 return NETDEV_TX_OK;
3799}
f85582f8 3800
6383c0b3
AE
3801/**
3802 * bnx2x_setup_tc - routine to configure net_device for multi tc
3803 *
3804 * @netdev: net device to configure
3805 * @tc: number of traffic classes to enable
3806 *
3807 * callback connected to the ndo_setup_tc function pointer
3808 */
3809int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3810{
3811 int cos, prio, count, offset;
3812 struct bnx2x *bp = netdev_priv(dev);
3813
3814 /* setup tc must be called under rtnl lock */
3815 ASSERT_RTNL();
3816
3817 /* no traffic classes requested. aborting */
3818 if (!num_tc) {
3819 netdev_reset_tc(dev);
3820 return 0;
3821 }
3822
3823 /* requested to support too many traffic classes */
3824 if (num_tc > bp->max_cos) {
51c1a580
MS
3825 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3826 num_tc, bp->max_cos);
6383c0b3
AE
3827 return -EINVAL;
3828 }
3829
3830 /* declare amount of supported traffic classes */
3831 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 3832 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
3833 return -EINVAL;
3834 }
3835
3836 /* configure priority to traffic class mapping */
3837 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3838 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
3839 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3840 "mapping priority %d to tc %d\n",
6383c0b3
AE
3841 prio, bp->prio_to_cos[prio]);
3842 }
3843
3844
3845 /* Use this configuration to diffrentiate tc0 from other COSes
3846 This can be used for ets or pfc, and save the effort of setting
3847 up a multio class queue disc or negotiating DCBX with a switch
3848 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 3849 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
3850 for (prio = 1; prio < 16; prio++) {
3851 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 3852 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
3853 } */
3854
3855 /* configure traffic class to transmission queue mapping */
3856 for (cos = 0; cos < bp->max_cos; cos++) {
3857 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 3858 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 3859 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
3860 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3861 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
3862 cos, offset, count);
3863 }
3864
3865 return 0;
3866}
3867
9f6c9258
DK
3868/* called with rtnl_lock */
3869int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3870{
3871 struct sockaddr *addr = p;
3872 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 3873 int rc = 0;
9f6c9258 3874
51c1a580
MS
3875 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3876 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 3877 return -EINVAL;
51c1a580 3878 }
614c76df 3879
a3348722
BW
3880 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3881 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 3882 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 3883 return -EINVAL;
51c1a580 3884 }
9f6c9258 3885
619c5cb6
VZ
3886 if (netif_running(dev)) {
3887 rc = bnx2x_set_eth_mac(bp, false);
3888 if (rc)
3889 return rc;
3890 }
3891
9f6c9258 3892 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 3893
523224a3 3894 if (netif_running(dev))
619c5cb6 3895 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 3896
619c5cb6 3897 return rc;
9f6c9258
DK
3898}
3899
b3b83c3f
DK
3900static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3901{
3902 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3903 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 3904 u8 cos;
b3b83c3f
DK
3905
3906 /* Common */
55c11941 3907
b3b83c3f
DK
3908 if (IS_FCOE_IDX(fp_index)) {
3909 memset(sb, 0, sizeof(union host_hc_status_block));
3910 fp->status_blk_mapping = 0;
b3b83c3f 3911 } else {
b3b83c3f 3912 /* status blocks */
619c5cb6 3913 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3914 BNX2X_PCI_FREE(sb->e2_sb,
3915 bnx2x_fp(bp, fp_index,
3916 status_blk_mapping),
3917 sizeof(struct host_hc_status_block_e2));
3918 else
3919 BNX2X_PCI_FREE(sb->e1x_sb,
3920 bnx2x_fp(bp, fp_index,
3921 status_blk_mapping),
3922 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 3923 }
55c11941 3924
b3b83c3f
DK
3925 /* Rx */
3926 if (!skip_rx_queue(bp, fp_index)) {
3927 bnx2x_free_rx_bds(fp);
3928
3929 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3930 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3931 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3932 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3933 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3934
3935 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3936 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3937 sizeof(struct eth_fast_path_rx_cqe) *
3938 NUM_RCQ_BD);
3939
3940 /* SGE ring */
3941 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3942 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3943 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3944 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3945 }
3946
3947 /* Tx */
3948 if (!skip_tx_queue(bp, fp_index)) {
3949 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 3950 for_each_cos_in_tx_queue(fp, cos) {
65565884 3951 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 3952
51c1a580 3953 DP(NETIF_MSG_IFDOWN,
94f05b0f 3954 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
3955 fp_index, cos, txdata->cid);
3956
3957 BNX2X_FREE(txdata->tx_buf_ring);
3958 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3959 txdata->tx_desc_mapping,
3960 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3961 }
b3b83c3f
DK
3962 }
3963 /* end of fastpath */
3964}
3965
55c11941
MS
3966void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3967{
3968 int i;
3969 for_each_cnic_queue(bp, i)
3970 bnx2x_free_fp_mem_at(bp, i);
3971}
3972
b3b83c3f
DK
3973void bnx2x_free_fp_mem(struct bnx2x *bp)
3974{
3975 int i;
55c11941 3976 for_each_eth_queue(bp, i)
b3b83c3f
DK
3977 bnx2x_free_fp_mem_at(bp, i);
3978}
3979
1191cb83 3980static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
3981{
3982 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 3983 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
3984 bnx2x_fp(bp, index, sb_index_values) =
3985 (__le16 *)status_blk.e2_sb->sb.index_values;
3986 bnx2x_fp(bp, index, sb_running_index) =
3987 (__le16 *)status_blk.e2_sb->sb.running_index;
3988 } else {
3989 bnx2x_fp(bp, index, sb_index_values) =
3990 (__le16 *)status_blk.e1x_sb->sb.index_values;
3991 bnx2x_fp(bp, index, sb_running_index) =
3992 (__le16 *)status_blk.e1x_sb->sb.running_index;
3993 }
3994}
3995
1191cb83
ED
3996/* Returns the number of actually allocated BDs */
3997static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3998 int rx_ring_size)
3999{
4000 struct bnx2x *bp = fp->bp;
4001 u16 ring_prod, cqe_ring_prod;
4002 int i, failure_cnt = 0;
4003
4004 fp->rx_comp_cons = 0;
4005 cqe_ring_prod = ring_prod = 0;
4006
4007 /* This routine is called only during fo init so
4008 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4009 */
4010 for (i = 0; i < rx_ring_size; i++) {
4011 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4012 failure_cnt++;
4013 continue;
4014 }
4015 ring_prod = NEXT_RX_IDX(ring_prod);
4016 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4017 WARN_ON(ring_prod <= (i - failure_cnt));
4018 }
4019
4020 if (failure_cnt)
4021 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4022 i - failure_cnt, fp->index);
4023
4024 fp->rx_bd_prod = ring_prod;
4025 /* Limit the CQE producer by the CQE ring size */
4026 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4027 cqe_ring_prod);
4028 fp->rx_pkt = fp->rx_calls = 0;
4029
15192a8c 4030 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4031
4032 return i - failure_cnt;
4033}
4034
4035static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4036{
4037 int i;
4038
4039 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4040 struct eth_rx_cqe_next_page *nextpg;
4041
4042 nextpg = (struct eth_rx_cqe_next_page *)
4043 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4044 nextpg->addr_hi =
4045 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4046 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4047 nextpg->addr_lo =
4048 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4049 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4050 }
4051}
4052
b3b83c3f
DK
4053static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4054{
4055 union host_hc_status_block *sb;
4056 struct bnx2x_fastpath *fp = &bp->fp[index];
4057 int ring_size = 0;
6383c0b3 4058 u8 cos;
c2188952 4059 int rx_ring_size = 0;
b3b83c3f 4060
a3348722
BW
4061 if (!bp->rx_ring_size &&
4062 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4063 rx_ring_size = MIN_RX_SIZE_NONTPA;
4064 bp->rx_ring_size = rx_ring_size;
55c11941 4065 } else if (!bp->rx_ring_size) {
c2188952
VZ
4066 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4067
065f8b92
YM
4068 if (CHIP_IS_E3(bp)) {
4069 u32 cfg = SHMEM_RD(bp,
4070 dev_info.port_hw_config[BP_PORT(bp)].
4071 default_cfg);
4072
4073 /* Decrease ring size for 1G functions */
4074 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4075 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4076 rx_ring_size /= 10;
4077 }
d760fc37 4078
c2188952
VZ
4079 /* allocate at least number of buffers required by FW */
4080 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4081 MIN_RX_SIZE_TPA, rx_ring_size);
4082
4083 bp->rx_ring_size = rx_ring_size;
614c76df 4084 } else /* if rx_ring_size specified - use it */
c2188952 4085 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4086
b3b83c3f
DK
4087 /* Common */
4088 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4089
b3b83c3f 4090 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4091 /* status blocks */
619c5cb6 4092 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4093 BNX2X_PCI_ALLOC(sb->e2_sb,
4094 &bnx2x_fp(bp, index, status_blk_mapping),
4095 sizeof(struct host_hc_status_block_e2));
4096 else
4097 BNX2X_PCI_ALLOC(sb->e1x_sb,
4098 &bnx2x_fp(bp, index, status_blk_mapping),
4099 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4100 }
8eef2af1
DK
4101
4102 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4103 * set shortcuts for it.
4104 */
4105 if (!IS_FCOE_IDX(index))
4106 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4107
4108 /* Tx */
4109 if (!skip_tx_queue(bp, index)) {
4110 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4111 for_each_cos_in_tx_queue(fp, cos) {
65565884 4112 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4113
51c1a580
MS
4114 DP(NETIF_MSG_IFUP,
4115 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4116 index, cos);
4117
4118 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4119 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4120 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4121 &txdata->tx_desc_mapping,
b3b83c3f 4122 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4123 }
b3b83c3f
DK
4124 }
4125
4126 /* Rx */
4127 if (!skip_rx_queue(bp, index)) {
4128 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4129 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4130 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4131 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4132 &bnx2x_fp(bp, index, rx_desc_mapping),
4133 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4134
4135 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4136 &bnx2x_fp(bp, index, rx_comp_mapping),
4137 sizeof(struct eth_fast_path_rx_cqe) *
4138 NUM_RCQ_BD);
4139
4140 /* SGE ring */
4141 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4142 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4143 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4144 &bnx2x_fp(bp, index, rx_sge_mapping),
4145 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4146 /* RX BD ring */
4147 bnx2x_set_next_page_rx_bd(fp);
4148
4149 /* CQ ring */
4150 bnx2x_set_next_page_rx_cq(fp);
4151
4152 /* BDs */
4153 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4154 if (ring_size < rx_ring_size)
4155 goto alloc_mem_err;
4156 }
4157
4158 return 0;
4159
4160/* handles low memory cases */
4161alloc_mem_err:
4162 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4163 index, ring_size);
4164 /* FW will drop all packets if queue is not big enough,
4165 * In these cases we disable the queue
6383c0b3 4166 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4167 */
4168 if (ring_size < (fp->disable_tpa ?
eb722d7a 4169 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4170 /* release memory allocated for this queue */
4171 bnx2x_free_fp_mem_at(bp, index);
4172 return -ENOMEM;
4173 }
4174 return 0;
4175}
4176
55c11941
MS
4177int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4178{
4179 if (!NO_FCOE(bp))
4180 /* FCoE */
4181 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4182 /* we will fail load process instead of mark
4183 * NO_FCOE_FLAG
4184 */
4185 return -ENOMEM;
4186
4187 return 0;
4188}
4189
b3b83c3f
DK
4190int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4191{
4192 int i;
4193
55c11941
MS
4194 /* 1. Allocate FP for leading - fatal if error
4195 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4196 */
4197
4198 /* leading */
4199 if (bnx2x_alloc_fp_mem_at(bp, 0))
4200 return -ENOMEM;
6383c0b3 4201
b3b83c3f
DK
4202 /* RSS */
4203 for_each_nondefault_eth_queue(bp, i)
4204 if (bnx2x_alloc_fp_mem_at(bp, i))
4205 break;
4206
4207 /* handle memory failures */
4208 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4209 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4210
4211 WARN_ON(delta < 0);
4864a16a 4212 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4213 if (CNIC_SUPPORT(bp))
4214 /* move non eth FPs next to last eth FP
4215 * must be done in that order
4216 * FCOE_IDX < FWD_IDX < OOO_IDX
4217 */
b3b83c3f 4218
55c11941
MS
4219 /* move FCoE fp even NO_FCOE_FLAG is on */
4220 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4221 bp->num_ethernet_queues -= delta;
4222 bp->num_queues = bp->num_ethernet_queues +
4223 bp->num_cnic_queues;
b3b83c3f
DK
4224 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4225 bp->num_queues + delta, bp->num_queues);
4226 }
4227
4228 return 0;
4229}
d6214d7a 4230
523224a3
DK
4231void bnx2x_free_mem_bp(struct bnx2x *bp)
4232{
15192a8c 4233 kfree(bp->fp->tpa_info);
523224a3 4234 kfree(bp->fp);
15192a8c
BW
4235 kfree(bp->sp_objs);
4236 kfree(bp->fp_stats);
65565884 4237 kfree(bp->bnx2x_txq);
523224a3
DK
4238 kfree(bp->msix_table);
4239 kfree(bp->ilt);
4240}
4241
0329aba1 4242int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4243{
4244 struct bnx2x_fastpath *fp;
4245 struct msix_entry *tbl;
4246 struct bnx2x_ilt *ilt;
6383c0b3 4247 int msix_table_size = 0;
55c11941 4248 int fp_array_size, txq_array_size;
15192a8c 4249 int i;
6383c0b3
AE
4250
4251 /*
4252 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4253 * path IGU SBs plus default SB (for PF only).
6383c0b3 4254 */
1ab4434c
AE
4255 msix_table_size = bp->igu_sb_cnt;
4256 if (IS_PF(bp))
4257 msix_table_size++;
4258 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4259
6383c0b3 4260 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4261 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
15192a8c
BW
4262 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
4263
4264 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4265 if (!fp)
4266 goto alloc_err;
15192a8c
BW
4267 for (i = 0; i < fp_array_size; i++) {
4268 fp[i].tpa_info =
4269 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4270 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4271 if (!(fp[i].tpa_info))
4272 goto alloc_err;
4273 }
4274
523224a3
DK
4275 bp->fp = fp;
4276
15192a8c
BW
4277 /* allocate sp objs */
4278 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
4279 GFP_KERNEL);
4280 if (!bp->sp_objs)
4281 goto alloc_err;
4282
4283 /* allocate fp_stats */
4284 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
4285 GFP_KERNEL);
4286 if (!bp->fp_stats)
4287 goto alloc_err;
4288
65565884 4289 /* Allocate memory for the transmission queues array */
55c11941
MS
4290 txq_array_size =
4291 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4292 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4293
4294 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4295 GFP_KERNEL);
65565884
MS
4296 if (!bp->bnx2x_txq)
4297 goto alloc_err;
4298
523224a3 4299 /* msix table */
01e23742 4300 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4301 if (!tbl)
4302 goto alloc_err;
4303 bp->msix_table = tbl;
4304
4305 /* ilt */
4306 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4307 if (!ilt)
4308 goto alloc_err;
4309 bp->ilt = ilt;
4310
4311 return 0;
4312alloc_err:
4313 bnx2x_free_mem_bp(bp);
4314 return -ENOMEM;
4315
4316}
4317
a9fccec7 4318int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4319{
4320 struct bnx2x *bp = netdev_priv(dev);
4321
4322 if (unlikely(!netif_running(dev)))
4323 return 0;
4324
5d07d868 4325 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4326 return bnx2x_nic_load(bp, LOAD_NORMAL);
4327}
4328
1ac9e428
YR
4329int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4330{
4331 u32 sel_phy_idx = 0;
4332 if (bp->link_params.num_phys <= 1)
4333 return INT_PHY;
4334
4335 if (bp->link_vars.link_up) {
4336 sel_phy_idx = EXT_PHY1;
4337 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4338 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4339 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4340 sel_phy_idx = EXT_PHY2;
4341 } else {
4342
4343 switch (bnx2x_phy_selection(&bp->link_params)) {
4344 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4345 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4346 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4347 sel_phy_idx = EXT_PHY1;
4348 break;
4349 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4350 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4351 sel_phy_idx = EXT_PHY2;
4352 break;
4353 }
4354 }
4355
4356 return sel_phy_idx;
4357
4358}
4359int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4360{
4361 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4362 /*
2de67439 4363 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4364 * swapping is enabled). So when swapping is enabled, we need to reverse
4365 * the configuration
4366 */
4367
4368 if (bp->link_params.multi_phy_config &
4369 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4370 if (sel_phy_idx == EXT_PHY1)
4371 sel_phy_idx = EXT_PHY2;
4372 else if (sel_phy_idx == EXT_PHY2)
4373 sel_phy_idx = EXT_PHY1;
4374 }
4375 return LINK_CONFIG_IDX(sel_phy_idx);
4376}
4377
55c11941 4378#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4379int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4380{
4381 struct bnx2x *bp = netdev_priv(dev);
4382 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4383
4384 switch (type) {
4385 case NETDEV_FCOE_WWNN:
4386 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4387 cp->fcoe_wwn_node_name_lo);
4388 break;
4389 case NETDEV_FCOE_WWPN:
4390 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4391 cp->fcoe_wwn_port_name_lo);
4392 break;
4393 default:
51c1a580 4394 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4395 return -EINVAL;
4396 }
4397
4398 return 0;
4399}
4400#endif
4401
9f6c9258
DK
4402/* called with rtnl_lock */
4403int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4404{
4405 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4406
4407 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4408 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4409 return -EAGAIN;
4410 }
4411
4412 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4413 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4414 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4415 return -EINVAL;
51c1a580 4416 }
9f6c9258
DK
4417
4418 /* This does not race with packet allocation
4419 * because the actual alloc size is
4420 * only updated as part of load
4421 */
4422 dev->mtu = new_mtu;
4423
66371c44
MM
4424 return bnx2x_reload_if_running(dev);
4425}
4426
c8f44aff 4427netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4428 netdev_features_t features)
66371c44
MM
4429{
4430 struct bnx2x *bp = netdev_priv(dev);
4431
4432 /* TPA requires Rx CSUM offloading */
621b4d66 4433 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4434 features &= ~NETIF_F_LRO;
621b4d66
DK
4435 features &= ~NETIF_F_GRO;
4436 }
66371c44
MM
4437
4438 return features;
4439}
4440
c8f44aff 4441int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4442{
4443 struct bnx2x *bp = netdev_priv(dev);
4444 u32 flags = bp->flags;
538dd2e3 4445 bool bnx2x_reload = false;
66371c44
MM
4446
4447 if (features & NETIF_F_LRO)
4448 flags |= TPA_ENABLE_FLAG;
4449 else
4450 flags &= ~TPA_ENABLE_FLAG;
4451
621b4d66
DK
4452 if (features & NETIF_F_GRO)
4453 flags |= GRO_ENABLE_FLAG;
4454 else
4455 flags &= ~GRO_ENABLE_FLAG;
4456
538dd2e3
MB
4457 if (features & NETIF_F_LOOPBACK) {
4458 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4459 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4460 bnx2x_reload = true;
4461 }
4462 } else {
4463 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4464 bp->link_params.loopback_mode = LOOPBACK_NONE;
4465 bnx2x_reload = true;
4466 }
4467 }
4468
66371c44
MM
4469 if (flags ^ bp->flags) {
4470 bp->flags = flags;
538dd2e3
MB
4471 bnx2x_reload = true;
4472 }
66371c44 4473
538dd2e3 4474 if (bnx2x_reload) {
66371c44
MM
4475 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4476 return bnx2x_reload_if_running(dev);
4477 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4478 }
4479
66371c44 4480 return 0;
9f6c9258
DK
4481}
4482
4483void bnx2x_tx_timeout(struct net_device *dev)
4484{
4485 struct bnx2x *bp = netdev_priv(dev);
4486
4487#ifdef BNX2X_STOP_ON_ERROR
4488 if (!bp->panic)
4489 bnx2x_panic();
4490#endif
7be08a72
AE
4491
4492 smp_mb__before_clear_bit();
4493 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4494 smp_mb__after_clear_bit();
4495
9f6c9258 4496 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4497 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4498}
4499
9f6c9258
DK
4500int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4501{
4502 struct net_device *dev = pci_get_drvdata(pdev);
4503 struct bnx2x *bp;
4504
4505 if (!dev) {
4506 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4507 return -ENODEV;
4508 }
4509 bp = netdev_priv(dev);
4510
4511 rtnl_lock();
4512
4513 pci_save_state(pdev);
4514
4515 if (!netif_running(dev)) {
4516 rtnl_unlock();
4517 return 0;
4518 }
4519
4520 netif_device_detach(dev);
4521
5d07d868 4522 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4523
4524 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4525
4526 rtnl_unlock();
4527
4528 return 0;
4529}
4530
4531int bnx2x_resume(struct pci_dev *pdev)
4532{
4533 struct net_device *dev = pci_get_drvdata(pdev);
4534 struct bnx2x *bp;
4535 int rc;
4536
4537 if (!dev) {
4538 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4539 return -ENODEV;
4540 }
4541 bp = netdev_priv(dev);
4542
4543 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4544 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4545 return -EAGAIN;
4546 }
4547
4548 rtnl_lock();
4549
4550 pci_restore_state(pdev);
4551
4552 if (!netif_running(dev)) {
4553 rtnl_unlock();
4554 return 0;
4555 }
4556
4557 bnx2x_set_power_state(bp, PCI_D0);
4558 netif_device_attach(dev);
4559
4560 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4561
4562 rtnl_unlock();
4563
4564 return rc;
4565}
619c5cb6
VZ
4566
4567
4568void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4569 u32 cid)
4570{
4571 /* ustorm cxt validation */
4572 cxt->ustorm_ag_context.cdu_usage =
4573 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4574 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4575 /* xcontext validation */
4576 cxt->xstorm_ag_context.cdu_reserved =
4577 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4578 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4579}
4580
1191cb83
ED
4581static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4582 u8 fw_sb_id, u8 sb_index,
4583 u8 ticks)
619c5cb6
VZ
4584{
4585
4586 u32 addr = BAR_CSTRORM_INTMEM +
4587 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4588 REG_WR8(bp, addr, ticks);
51c1a580
MS
4589 DP(NETIF_MSG_IFUP,
4590 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4591 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4592}
4593
1191cb83
ED
4594static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4595 u16 fw_sb_id, u8 sb_index,
4596 u8 disable)
619c5cb6
VZ
4597{
4598 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4599 u32 addr = BAR_CSTRORM_INTMEM +
4600 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4601 u16 flags = REG_RD16(bp, addr);
4602 /* clear and set */
4603 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4604 flags |= enable_flag;
4605 REG_WR16(bp, addr, flags);
51c1a580
MS
4606 DP(NETIF_MSG_IFUP,
4607 "port %x fw_sb_id %d sb_index %d disable %d\n",
4608 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4609}
4610
4611void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4612 u8 sb_index, u8 disable, u16 usec)
4613{
4614 int port = BP_PORT(bp);
4615 u8 ticks = usec / BNX2X_BTR;
4616
4617 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4618
4619 disable = disable ? 1 : (usec ? 0 : 1);
4620 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4621}