net: fec: report correct hardware stamping info to ethtool
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
85b26ea1 3 * Copyright (c) 2007-2012 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
f2e0899f 24#include <net/ipv6.h>
7f3e01fe 25#include <net/ip6_checksum.h>
c0cba59e 26#include <linux/prefetch.h>
9f6c9258 27#include "bnx2x_cmn.h"
523224a3 28#include "bnx2x_init.h"
042181f5 29#include "bnx2x_sp.h"
b56e9670 30#include "bnx2x_sriov.h"
9f6c9258 31
b3b83c3f
DK
32/**
33 * bnx2x_move_fp - move content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @from: source FP index
37 * @to: destination FP index
38 *
39 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
40 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
65565884
MS
42 * source onto the target. Update txdata pointers and related
43 * content.
b3b83c3f
DK
44 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
49 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
53 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
72754080
AE
55
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
58
b3b83c3f
DK
59 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
61 to_fp->index = to;
65565884 62
15192a8c
BW
63 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
65565884
MS
69 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
72 */
73
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76 (bp)->max_cos;
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 }
81
82 memcpy(&bp->bnx2x_txq[old_txdata_index],
83 &bp->bnx2x_txq[new_txdata_index],
84 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
86}
87
8ca5e17e
AE
88/**
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
90 *
91 * @bp: driver handle
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
94 *
95 */
96void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97{
98 if (IS_PF(bp)) {
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106 "bc %d.%d.%d%s%s",
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111 } else {
112 strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
113 }
114}
115
619c5cb6
VZ
116int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
117
9f6c9258
DK
118/* free skb in the packet ring at pos idx
119 * return idx of last bd freed
120 */
6383c0b3 121static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
122 u16 idx, unsigned int *pkts_compl,
123 unsigned int *bytes_compl)
9f6c9258 124{
6383c0b3 125 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
126 struct eth_tx_start_bd *tx_start_bd;
127 struct eth_tx_bd *tx_data_bd;
128 struct sk_buff *skb = tx_buf->skb;
129 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
130 int nbd;
131
132 /* prefetch skb end pointer to speedup dev_kfree_skb() */
133 prefetch(&skb->end);
134
51c1a580 135 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 136 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
137
138 /* unmap first bd */
6383c0b3 139 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 140 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 141 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 142
619c5cb6 143
9f6c9258
DK
144 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
145#ifdef BNX2X_STOP_ON_ERROR
146 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
147 BNX2X_ERR("BAD nbd!\n");
148 bnx2x_panic();
149 }
150#endif
151 new_cons = nbd + tx_buf->first_bd;
152
153 /* Get the next bd */
154 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
155
156 /* Skip a parse bd... */
157 --nbd;
158 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
159
160 /* ...and the TSO split header bd since they have no mapping */
161 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
162 --nbd;
163 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
164 }
165
166 /* now free frags */
167 while (nbd > 0) {
168
6383c0b3 169 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
170 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
171 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
172 if (--nbd)
173 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
174 }
175
176 /* release skb */
177 WARN_ON(!skb);
d8290ae5 178 if (likely(skb)) {
2df1a70a
TH
179 (*pkts_compl)++;
180 (*bytes_compl) += skb->len;
181 }
d8290ae5 182
40955532 183 dev_kfree_skb_any(skb);
9f6c9258
DK
184 tx_buf->first_bd = 0;
185 tx_buf->skb = NULL;
186
187 return new_cons;
188}
189
6383c0b3 190int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 191{
9f6c9258 192 struct netdev_queue *txq;
6383c0b3 193 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 194 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
195
196#ifdef BNX2X_STOP_ON_ERROR
197 if (unlikely(bp->panic))
198 return -1;
199#endif
200
6383c0b3
AE
201 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
202 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
203 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
204
205 while (sw_cons != hw_cons) {
206 u16 pkt_cons;
207
208 pkt_cons = TX_BD(sw_cons);
209
51c1a580
MS
210 DP(NETIF_MSG_TX_DONE,
211 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 212 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 213
2df1a70a
TH
214 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
215 &pkts_compl, &bytes_compl);
216
9f6c9258
DK
217 sw_cons++;
218 }
219
2df1a70a
TH
220 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
221
6383c0b3
AE
222 txdata->tx_pkt_cons = sw_cons;
223 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
224
225 /* Need to make the tx_bd_cons update visible to start_xmit()
226 * before checking for netif_tx_queue_stopped(). Without the
227 * memory barrier, there is a small possibility that
228 * start_xmit() will miss it and cause the queue to be stopped
229 * forever.
619c5cb6
VZ
230 * On the other hand we need an rmb() here to ensure the proper
231 * ordering of bit testing in the following
232 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
233 */
234 smp_mb();
235
9f6c9258
DK
236 if (unlikely(netif_tx_queue_stopped(txq))) {
237 /* Taking tx_lock() is needed to prevent reenabling the queue
238 * while it's empty. This could have happen if rx_action() gets
239 * suspended in bnx2x_tx_int() after the condition before
240 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
241 *
242 * stops the queue->sees fresh tx_bd_cons->releases the queue->
243 * sends some packets consuming the whole queue again->
244 * stops the queue
245 */
246
247 __netif_tx_lock(txq, smp_processor_id());
248
249 if ((netif_tx_queue_stopped(txq)) &&
250 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 251 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
252 netif_tx_wake_queue(txq);
253
254 __netif_tx_unlock(txq);
255 }
256 return 0;
257}
258
259static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
260 u16 idx)
261{
262 u16 last_max = fp->last_max_sge;
263
264 if (SUB_S16(idx, last_max) > 0)
265 fp->last_max_sge = idx;
266}
267
621b4d66
DK
268static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
269 u16 sge_len,
270 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
271{
272 struct bnx2x *bp = fp->bp;
9f6c9258
DK
273 u16 last_max, last_elem, first_elem;
274 u16 delta = 0;
275 u16 i;
276
277 if (!sge_len)
278 return;
279
280 /* First mark all used pages */
281 for (i = 0; i < sge_len; i++)
619c5cb6 282 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 283 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
284
285 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 286 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
287
288 /* Here we assume that the last SGE index is the biggest */
289 prefetch((void *)(fp->sge_mask));
523224a3 290 bnx2x_update_last_max_sge(fp,
621b4d66 291 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
292
293 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
294 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
295 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
296
297 /* If ring is not full */
298 if (last_elem + 1 != first_elem)
299 last_elem++;
300
301 /* Now update the prod */
302 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
303 if (likely(fp->sge_mask[i]))
304 break;
305
619c5cb6
VZ
306 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
307 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
308 }
309
310 if (delta > 0) {
311 fp->rx_sge_prod += delta;
312 /* clear page-end entries */
313 bnx2x_clear_sge_mask_next_elems(fp);
314 }
315
316 DP(NETIF_MSG_RX_STATUS,
317 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
318 fp->last_max_sge, fp->rx_sge_prod);
319}
320
e52fcb24
ED
321/* Set Toeplitz hash value in the skb using the value from the
322 * CQE (calculated by HW).
323 */
324static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
325 const struct eth_fast_path_rx_cqe *cqe,
326 bool *l4_rxhash)
e52fcb24
ED
327{
328 /* Set Toeplitz hash from CQE */
329 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
330 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
331 enum eth_rss_hash_type htype;
332
333 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
334 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
335 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 336 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
337 }
338 *l4_rxhash = false;
e52fcb24
ED
339 return 0;
340}
341
9f6c9258 342static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 343 u16 cons, u16 prod,
619c5cb6 344 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
345{
346 struct bnx2x *bp = fp->bp;
347 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
348 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
349 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
350 dma_addr_t mapping;
619c5cb6
VZ
351 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
352 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 353
619c5cb6
VZ
354 /* print error if current state != stop */
355 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
356 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
357
e52fcb24 358 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 359 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 360 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
361 fp->rx_buf_size, DMA_FROM_DEVICE);
362 /*
363 * ...if it fails - move the skb from the consumer to the producer
364 * and set the current aggregation state as ERROR to drop it
365 * when TPA_STOP arrives.
366 */
367
368 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
369 /* Move the BD from the consumer to the producer */
e52fcb24 370 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
371 tpa_info->tpa_state = BNX2X_TPA_ERROR;
372 return;
373 }
9f6c9258 374
e52fcb24
ED
375 /* move empty data from pool to prod */
376 prod_rx_buf->data = first_buf->data;
619c5cb6 377 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 378 /* point prod_bd to new data */
9f6c9258
DK
379 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
380 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
381
619c5cb6
VZ
382 /* move partial skb from cons to pool (don't unmap yet) */
383 *first_buf = *cons_rx_buf;
384
385 /* mark bin state as START */
386 tpa_info->parsing_flags =
387 le16_to_cpu(cqe->pars_flags.flags);
388 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
389 tpa_info->tpa_state = BNX2X_TPA_START;
390 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
391 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 392 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
393 if (fp->mode == TPA_MODE_GRO) {
394 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
395 tpa_info->full_page =
396 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
397 tpa_info->gro_size = gro_size;
398 }
619c5cb6 399
9f6c9258
DK
400#ifdef BNX2X_STOP_ON_ERROR
401 fp->tpa_queue_used |= (1 << queue);
402#ifdef _ASM_GENERIC_INT_L64_H
403 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
404#else
405 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
406#endif
407 fp->tpa_queue_used);
408#endif
409}
410
e4e3c02a
VZ
411/* Timestamp option length allowed for TPA aggregation:
412 *
413 * nop nop kind length echo val
414 */
415#define TPA_TSTAMP_OPT_LEN 12
416/**
e8920674 417 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 418 *
e8920674
DK
419 * @bp: driver handle
420 * @parsing_flags: parsing flags from the START CQE
421 * @len_on_bd: total length of the first packet for the
422 * aggregation.
423 *
424 * Approximate value of the MSS for this aggregation calculated using
425 * the first packet of it.
e4e3c02a 426 */
1191cb83
ED
427static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
428 u16 len_on_bd)
e4e3c02a 429{
619c5cb6
VZ
430 /*
431 * TPA arrgregation won't have either IP options or TCP options
432 * other than timestamp or IPv6 extension headers.
e4e3c02a 433 */
619c5cb6
VZ
434 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
435
436 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
437 PRS_FLAG_OVERETH_IPV6)
438 hdrs_len += sizeof(struct ipv6hdr);
439 else /* IPv4 */
440 hdrs_len += sizeof(struct iphdr);
e4e3c02a
VZ
441
442
443 /* Check if there was a TCP timestamp, if there is it's will
444 * always be 12 bytes length: nop nop kind length echo val.
445 *
446 * Otherwise FW would close the aggregation.
447 */
448 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
449 hdrs_len += TPA_TSTAMP_OPT_LEN;
450
451 return len_on_bd - hdrs_len;
452}
453
1191cb83
ED
454static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
455 struct bnx2x_fastpath *fp, u16 index)
456{
457 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
458 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
459 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
460 dma_addr_t mapping;
461
462 if (unlikely(page == NULL)) {
463 BNX2X_ERR("Can't alloc sge\n");
464 return -ENOMEM;
465 }
466
467 mapping = dma_map_page(&bp->pdev->dev, page, 0,
468 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
469 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
470 __free_pages(page, PAGES_PER_SGE_SHIFT);
471 BNX2X_ERR("Can't map sge\n");
472 return -ENOMEM;
473 }
474
475 sw_buf->page = page;
476 dma_unmap_addr_set(sw_buf, mapping, mapping);
477
478 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
479 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
480
481 return 0;
482}
483
9f6c9258 484static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
485 struct bnx2x_agg_info *tpa_info,
486 u16 pages,
487 struct sk_buff *skb,
619c5cb6
VZ
488 struct eth_end_agg_rx_cqe *cqe,
489 u16 cqe_idx)
9f6c9258
DK
490{
491 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
492 u32 i, frag_len, frag_size;
493 int err, j, frag_id = 0;
619c5cb6 494 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 495 u16 full_page = 0, gro_size = 0;
9f6c9258 496
619c5cb6 497 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
498
499 if (fp->mode == TPA_MODE_GRO) {
500 gro_size = tpa_info->gro_size;
501 full_page = tpa_info->full_page;
502 }
9f6c9258
DK
503
504 /* This is needed in order to enable forwarding support */
621b4d66 505 if (frag_size) {
619c5cb6
VZ
506 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
507 tpa_info->parsing_flags, len_on_bd);
9f6c9258 508
621b4d66
DK
509 /* set for GRO */
510 if (fp->mode == TPA_MODE_GRO)
511 skb_shinfo(skb)->gso_type =
512 (GET_FLAG(tpa_info->parsing_flags,
513 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
514 PRS_FLAG_OVERETH_IPV6) ?
515 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
516 }
517
518
9f6c9258
DK
519#ifdef BNX2X_STOP_ON_ERROR
520 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
521 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
522 pages, cqe_idx);
619c5cb6 523 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
524 bnx2x_panic();
525 return -EINVAL;
526 }
527#endif
528
529 /* Run through the SGL and compose the fragmented skb */
530 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 531 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
532
533 /* FW gives the indices of the SGE as if the ring is an array
534 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
535 if (fp->mode == TPA_MODE_GRO)
536 frag_len = min_t(u32, frag_size, (u32)full_page);
537 else /* LRO */
538 frag_len = min_t(u32, frag_size,
539 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
540
9f6c9258
DK
541 rx_pg = &fp->rx_page_ring[sge_idx];
542 old_rx_pg = *rx_pg;
543
544 /* If we fail to allocate a substitute page, we simply stop
545 where we are and drop the whole packet */
546 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
547 if (unlikely(err)) {
15192a8c 548 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
549 return err;
550 }
551
552 /* Unmap the page as we r going to pass it to the stack */
553 dma_unmap_page(&bp->pdev->dev,
554 dma_unmap_addr(&old_rx_pg, mapping),
555 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
9f6c9258 556 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
557 if (fp->mode == TPA_MODE_LRO)
558 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
559 else { /* GRO */
560 int rem;
561 int offset = 0;
562 for (rem = frag_len; rem > 0; rem -= gro_size) {
563 int len = rem > gro_size ? gro_size : rem;
564 skb_fill_page_desc(skb, frag_id++,
565 old_rx_pg.page, offset, len);
566 if (offset)
567 get_page(old_rx_pg.page);
568 offset += len;
569 }
570 }
9f6c9258
DK
571
572 skb->data_len += frag_len;
e1ac50f6 573 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
9f6c9258
DK
574 skb->len += frag_len;
575
576 frag_size -= frag_len;
577 }
578
579 return 0;
580}
581
d46d132c
ED
582static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
583{
584 if (fp->rx_frag_size)
585 put_page(virt_to_head_page(data));
586 else
587 kfree(data);
588}
589
590static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
591{
592 if (fp->rx_frag_size)
593 return netdev_alloc_frag(fp->rx_frag_size);
594
595 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
596}
597
598
1191cb83
ED
599static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
600 struct bnx2x_agg_info *tpa_info,
601 u16 pages,
602 struct eth_end_agg_rx_cqe *cqe,
603 u16 cqe_idx)
9f6c9258 604{
619c5cb6 605 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 606 u8 pad = tpa_info->placement_offset;
619c5cb6 607 u16 len = tpa_info->len_on_bd;
e52fcb24 608 struct sk_buff *skb = NULL;
621b4d66 609 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
610 u8 old_tpa_state = tpa_info->tpa_state;
611
612 tpa_info->tpa_state = BNX2X_TPA_STOP;
613
614 /* If we there was an error during the handling of the TPA_START -
615 * drop this aggregation.
616 */
617 if (old_tpa_state == BNX2X_TPA_ERROR)
618 goto drop;
619
e52fcb24 620 /* Try to allocate the new data */
d46d132c 621 new_data = bnx2x_frag_alloc(fp);
9f6c9258
DK
622 /* Unmap skb in the pool anyway, as we are going to change
623 pool entry status to BNX2X_TPA_STOP even if new skb allocation
624 fails. */
625 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 626 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 627 if (likely(new_data))
d46d132c 628 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 629
e52fcb24 630 if (likely(skb)) {
9f6c9258 631#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 632 if (pad + len > fp->rx_buf_size) {
51c1a580 633 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 634 pad, len, fp->rx_buf_size);
9f6c9258
DK
635 bnx2x_panic();
636 return;
637 }
638#endif
639
e52fcb24 640 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 641 skb_put(skb, len);
e52fcb24 642 skb->rxhash = tpa_info->rxhash;
a334b5fb 643 skb->l4_rxhash = tpa_info->l4_rxhash;
9f6c9258
DK
644
645 skb->protocol = eth_type_trans(skb, bp->dev);
646 skb->ip_summed = CHECKSUM_UNNECESSARY;
647
621b4d66
DK
648 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
649 skb, cqe, cqe_idx)) {
619c5cb6
VZ
650 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
651 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9bcc0893 652 napi_gro_receive(&fp->napi, skb);
9f6c9258 653 } else {
51c1a580
MS
654 DP(NETIF_MSG_RX_STATUS,
655 "Failed to allocate new pages - dropping packet!\n");
40955532 656 dev_kfree_skb_any(skb);
9f6c9258
DK
657 }
658
659
e52fcb24
ED
660 /* put new data in bin */
661 rx_buf->data = new_data;
9f6c9258 662
619c5cb6 663 return;
9f6c9258 664 }
d46d132c 665 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
666drop:
667 /* drop the packet and keep the buffer in the bin */
668 DP(NETIF_MSG_RX_STATUS,
669 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 670 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
671}
672
1191cb83
ED
673static int bnx2x_alloc_rx_data(struct bnx2x *bp,
674 struct bnx2x_fastpath *fp, u16 index)
675{
676 u8 *data;
677 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
678 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
679 dma_addr_t mapping;
680
d46d132c 681 data = bnx2x_frag_alloc(fp);
1191cb83
ED
682 if (unlikely(data == NULL))
683 return -ENOMEM;
684
685 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
686 fp->rx_buf_size,
687 DMA_FROM_DEVICE);
688 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 689 bnx2x_frag_free(fp, data);
1191cb83
ED
690 BNX2X_ERR("Can't map rx data\n");
691 return -ENOMEM;
692 }
693
694 rx_buf->data = data;
695 dma_unmap_addr_set(rx_buf, mapping, mapping);
696
697 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
698 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
699
700 return 0;
701}
702
15192a8c
BW
703static
704void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
705 struct bnx2x_fastpath *fp,
706 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 707{
e488921f
MS
708 /* Do nothing if no L4 csum validation was done.
709 * We do not check whether IP csum was validated. For IPv4 we assume
710 * that if the card got as far as validating the L4 csum, it also
711 * validated the IP csum. IPv6 has no IP csum.
712 */
d6cb3e41 713 if (cqe->fast_path_cqe.status_flags &
e488921f 714 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
715 return;
716
e488921f 717 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
718
719 if (cqe->fast_path_cqe.type_error_flags &
720 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
721 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 722 qstats->hw_csum_err++;
d6cb3e41
ED
723 else
724 skb->ip_summed = CHECKSUM_UNNECESSARY;
725}
9f6c9258
DK
726
727int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
728{
729 struct bnx2x *bp = fp->bp;
730 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
731 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
732 int rx_pkt = 0;
733
734#ifdef BNX2X_STOP_ON_ERROR
735 if (unlikely(bp->panic))
736 return 0;
737#endif
738
739 /* CQ "next element" is of the size of the regular element,
740 that's why it's ok here */
741 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
742 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
743 hw_comp_cons++;
744
745 bd_cons = fp->rx_bd_cons;
746 bd_prod = fp->rx_bd_prod;
747 bd_prod_fw = bd_prod;
748 sw_comp_cons = fp->rx_comp_cons;
749 sw_comp_prod = fp->rx_comp_prod;
750
751 /* Memory barrier necessary as speculative reads of the rx
752 * buffer can be ahead of the index in the status block
753 */
754 rmb();
755
756 DP(NETIF_MSG_RX_STATUS,
757 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
758 fp->index, hw_comp_cons, sw_comp_cons);
759
760 while (sw_comp_cons != hw_comp_cons) {
761 struct sw_rx_bd *rx_buf = NULL;
762 struct sk_buff *skb;
763 union eth_rx_cqe *cqe;
619c5cb6 764 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 765 u8 cqe_fp_flags;
619c5cb6 766 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 767 u16 len, pad, queue;
e52fcb24 768 u8 *data;
a334b5fb 769 bool l4_rxhash;
9f6c9258 770
619c5cb6
VZ
771#ifdef BNX2X_STOP_ON_ERROR
772 if (unlikely(bp->panic))
773 return 0;
774#endif
775
9f6c9258
DK
776 comp_ring_cons = RCQ_BD(sw_comp_cons);
777 bd_prod = RX_BD(bd_prod);
778 bd_cons = RX_BD(bd_cons);
779
9f6c9258 780 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
781 cqe_fp = &cqe->fast_path_cqe;
782 cqe_fp_flags = cqe_fp->type_error_flags;
783 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 784
51c1a580
MS
785 DP(NETIF_MSG_RX_STATUS,
786 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
787 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
788 cqe_fp_flags, cqe_fp->status_flags,
789 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
790 le16_to_cpu(cqe_fp->vlan_tag),
791 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
792
793 /* is this a slowpath msg? */
619c5cb6 794 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
795 bnx2x_sp_event(fp, cqe);
796 goto next_cqe;
e52fcb24 797 }
621b4d66 798
e52fcb24
ED
799 rx_buf = &fp->rx_buf_ring[bd_cons];
800 data = rx_buf->data;
9f6c9258 801
e52fcb24 802 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
803 struct bnx2x_agg_info *tpa_info;
804 u16 frag_size, pages;
619c5cb6 805#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
806 /* sanity check */
807 if (fp->disable_tpa &&
808 (CQE_TYPE_START(cqe_fp_type) ||
809 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 810 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 811 CQE_TYPE(cqe_fp_type));
619c5cb6 812#endif
9f6c9258 813
e52fcb24
ED
814 if (CQE_TYPE_START(cqe_fp_type)) {
815 u16 queue = cqe_fp->queue_index;
816 DP(NETIF_MSG_RX_STATUS,
817 "calling tpa_start on queue %d\n",
818 queue);
9f6c9258 819
e52fcb24
ED
820 bnx2x_tpa_start(fp, queue,
821 bd_cons, bd_prod,
822 cqe_fp);
621b4d66 823
e52fcb24 824 goto next_rx;
e52fcb24 825
621b4d66
DK
826 }
827 queue = cqe->end_agg_cqe.queue_index;
828 tpa_info = &fp->tpa_info[queue];
829 DP(NETIF_MSG_RX_STATUS,
830 "calling tpa_stop on queue %d\n",
831 queue);
832
833 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
834 tpa_info->len_on_bd;
835
836 if (fp->mode == TPA_MODE_GRO)
837 pages = (frag_size + tpa_info->full_page - 1) /
838 tpa_info->full_page;
839 else
840 pages = SGE_PAGE_ALIGN(frag_size) >>
841 SGE_PAGE_SHIFT;
842
843 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
844 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 845#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
846 if (bp->panic)
847 return 0;
9f6c9258
DK
848#endif
849
621b4d66
DK
850 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
851 goto next_cqe;
e52fcb24
ED
852 }
853 /* non TPA */
621b4d66 854 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
855 pad = cqe_fp->placement_offset;
856 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 857 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
858 pad + RX_COPY_THRESH,
859 DMA_FROM_DEVICE);
860 pad += NET_SKB_PAD;
861 prefetch(data + pad); /* speedup eth_type_trans() */
862 /* is this an error packet? */
863 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 864 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
865 "ERROR flags %x rx packet %u\n",
866 cqe_fp_flags, sw_comp_cons);
15192a8c 867 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
868 goto reuse_rx;
869 }
9f6c9258 870
e52fcb24
ED
871 /* Since we don't have a jumbo ring
872 * copy small packets if mtu > 1500
873 */
874 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
875 (len <= RX_COPY_THRESH)) {
876 skb = netdev_alloc_skb_ip_align(bp->dev, len);
877 if (skb == NULL) {
51c1a580 878 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 879 "ERROR packet dropped because of alloc failure\n");
15192a8c 880 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
881 goto reuse_rx;
882 }
e52fcb24
ED
883 memcpy(skb->data, data + pad, len);
884 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
885 } else {
886 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 887 dma_unmap_single(&bp->pdev->dev,
e52fcb24 888 dma_unmap_addr(rx_buf, mapping),
a8c94b91 889 fp->rx_buf_size,
9f6c9258 890 DMA_FROM_DEVICE);
d46d132c 891 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 892 if (unlikely(!skb)) {
d46d132c 893 bnx2x_frag_free(fp, data);
15192a8c
BW
894 bnx2x_fp_qstats(bp, fp)->
895 rx_skb_alloc_failed++;
e52fcb24
ED
896 goto next_rx;
897 }
9f6c9258 898 skb_reserve(skb, pad);
9f6c9258 899 } else {
51c1a580
MS
900 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
901 "ERROR packet dropped because of alloc failure\n");
15192a8c 902 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 903reuse_rx:
e52fcb24 904 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
905 goto next_rx;
906 }
036d2df9 907 }
9f6c9258 908
036d2df9
DK
909 skb_put(skb, len);
910 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 911
036d2df9 912 /* Set Toeplitz hash for a none-LRO skb */
a334b5fb
ED
913 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
914 skb->l4_rxhash = l4_rxhash;
9f6c9258 915
036d2df9 916 skb_checksum_none_assert(skb);
f85582f8 917
d6cb3e41 918 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
919 bnx2x_csum_validate(skb, cqe, fp,
920 bnx2x_fp_qstats(bp, fp));
9f6c9258 921
f233cafe 922 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 923
619c5cb6
VZ
924 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
925 PARSING_FLAGS_VLAN)
9bcc0893 926 __vlan_hwaccel_put_tag(skb,
619c5cb6 927 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 928 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
929
930
931next_rx:
e52fcb24 932 rx_buf->data = NULL;
9f6c9258
DK
933
934 bd_cons = NEXT_RX_IDX(bd_cons);
935 bd_prod = NEXT_RX_IDX(bd_prod);
936 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
937 rx_pkt++;
938next_cqe:
939 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
940 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
941
942 if (rx_pkt == budget)
943 break;
944 } /* while */
945
946 fp->rx_bd_cons = bd_cons;
947 fp->rx_bd_prod = bd_prod_fw;
948 fp->rx_comp_cons = sw_comp_cons;
949 fp->rx_comp_prod = sw_comp_prod;
950
951 /* Update producers */
952 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
953 fp->rx_sge_prod);
954
955 fp->rx_pkt += rx_pkt;
956 fp->rx_calls++;
957
958 return rx_pkt;
959}
960
961static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
962{
963 struct bnx2x_fastpath *fp = fp_cookie;
964 struct bnx2x *bp = fp->bp;
6383c0b3 965 u8 cos;
9f6c9258 966
51c1a580
MS
967 DP(NETIF_MSG_INTR,
968 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3
DK
969 fp->index, fp->fw_sb_id, fp->igu_sb_id);
970 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
971
972#ifdef BNX2X_STOP_ON_ERROR
973 if (unlikely(bp->panic))
974 return IRQ_HANDLED;
975#endif
976
977 /* Handle Rx and Tx according to MSI-X vector */
978 prefetch(fp->rx_cons_sb);
6383c0b3
AE
979
980 for_each_cos_in_tx_queue(fp, cos)
65565884 981 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 982
523224a3 983 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
984 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
985
986 return IRQ_HANDLED;
987}
988
9f6c9258
DK
989/* HW Lock for shared dual port PHYs */
990void bnx2x_acquire_phy_lock(struct bnx2x *bp)
991{
992 mutex_lock(&bp->port.phy_mutex);
993
8203c4b6 994 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
995}
996
997void bnx2x_release_phy_lock(struct bnx2x *bp)
998{
8203c4b6 999 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1000
1001 mutex_unlock(&bp->port.phy_mutex);
1002}
1003
0793f83f
DK
1004/* calculates MF speed according to current linespeed and MF configuration */
1005u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1006{
1007 u16 line_speed = bp->link_vars.line_speed;
1008 if (IS_MF(bp)) {
faa6fcbb
DK
1009 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1010 bp->mf_config[BP_VN(bp)]);
1011
1012 /* Calculate the current MAX line speed limit for the MF
1013 * devices
0793f83f 1014 */
faa6fcbb
DK
1015 if (IS_MF_SI(bp))
1016 line_speed = (line_speed * maxCfg) / 100;
1017 else { /* SD mode */
0793f83f
DK
1018 u16 vn_max_rate = maxCfg * 100;
1019
1020 if (vn_max_rate < line_speed)
1021 line_speed = vn_max_rate;
faa6fcbb 1022 }
0793f83f
DK
1023 }
1024
1025 return line_speed;
1026}
1027
2ae17f66
VZ
1028/**
1029 * bnx2x_fill_report_data - fill link report data to report
1030 *
1031 * @bp: driver handle
1032 * @data: link state to update
1033 *
1034 * It uses a none-atomic bit operations because is called under the mutex.
1035 */
1191cb83
ED
1036static void bnx2x_fill_report_data(struct bnx2x *bp,
1037 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1038{
1039 u16 line_speed = bnx2x_get_mf_speed(bp);
1040
1041 memset(data, 0, sizeof(*data));
1042
1043 /* Fill the report data: efective line speed */
1044 data->line_speed = line_speed;
1045
1046 /* Link is down */
1047 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1048 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1049 &data->link_report_flags);
1050
1051 /* Full DUPLEX */
1052 if (bp->link_vars.duplex == DUPLEX_FULL)
1053 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1054
1055 /* Rx Flow Control is ON */
1056 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1057 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1058
1059 /* Tx Flow Control is ON */
1060 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1061 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1062}
1063
1064/**
1065 * bnx2x_link_report - report link status to OS.
1066 *
1067 * @bp: driver handle
1068 *
1069 * Calls the __bnx2x_link_report() under the same locking scheme
1070 * as a link/PHY state managing code to ensure a consistent link
1071 * reporting.
1072 */
1073
9f6c9258
DK
1074void bnx2x_link_report(struct bnx2x *bp)
1075{
2ae17f66
VZ
1076 bnx2x_acquire_phy_lock(bp);
1077 __bnx2x_link_report(bp);
1078 bnx2x_release_phy_lock(bp);
1079}
9f6c9258 1080
2ae17f66
VZ
1081/**
1082 * __bnx2x_link_report - report link status to OS.
1083 *
1084 * @bp: driver handle
1085 *
1086 * None atomic inmlementation.
1087 * Should be called under the phy_lock.
1088 */
1089void __bnx2x_link_report(struct bnx2x *bp)
1090{
1091 struct bnx2x_link_report_data cur_data;
9f6c9258 1092
2ae17f66 1093 /* reread mf_cfg */
ad5afc89 1094 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1095 bnx2x_read_mf_cfg(bp);
1096
1097 /* Read the current link report info */
1098 bnx2x_fill_report_data(bp, &cur_data);
1099
1100 /* Don't report link down or exactly the same link status twice */
1101 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1102 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1103 &bp->last_reported_link.link_report_flags) &&
1104 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1105 &cur_data.link_report_flags)))
1106 return;
1107
1108 bp->link_cnt++;
9f6c9258 1109
2ae17f66
VZ
1110 /* We are going to report a new link parameters now -
1111 * remember the current data for the next time.
1112 */
1113 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1114
2ae17f66
VZ
1115 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1116 &cur_data.link_report_flags)) {
1117 netif_carrier_off(bp->dev);
1118 netdev_err(bp->dev, "NIC Link is Down\n");
1119 return;
1120 } else {
94f05b0f
JP
1121 const char *duplex;
1122 const char *flow;
1123
2ae17f66 1124 netif_carrier_on(bp->dev);
9f6c9258 1125
2ae17f66
VZ
1126 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1127 &cur_data.link_report_flags))
94f05b0f 1128 duplex = "full";
9f6c9258 1129 else
94f05b0f 1130 duplex = "half";
9f6c9258 1131
2ae17f66
VZ
1132 /* Handle the FC at the end so that only these flags would be
1133 * possibly set. This way we may easily check if there is no FC
1134 * enabled.
1135 */
1136 if (cur_data.link_report_flags) {
1137 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1138 &cur_data.link_report_flags)) {
2ae17f66
VZ
1139 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1140 &cur_data.link_report_flags))
94f05b0f
JP
1141 flow = "ON - receive & transmit";
1142 else
1143 flow = "ON - receive";
9f6c9258 1144 } else {
94f05b0f 1145 flow = "ON - transmit";
9f6c9258 1146 }
94f05b0f
JP
1147 } else {
1148 flow = "none";
9f6c9258 1149 }
94f05b0f
JP
1150 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1151 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1152 }
1153}
1154
1191cb83
ED
1155static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1156{
1157 int i;
1158
1159 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1160 struct eth_rx_sge *sge;
1161
1162 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1163 sge->addr_hi =
1164 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1165 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1166
1167 sge->addr_lo =
1168 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1169 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1170 }
1171}
1172
1173static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1174 struct bnx2x_fastpath *fp, int last)
1175{
1176 int i;
1177
1178 for (i = 0; i < last; i++) {
1179 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1180 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1181 u8 *data = first_buf->data;
1182
1183 if (data == NULL) {
1184 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1185 continue;
1186 }
1187 if (tpa_info->tpa_state == BNX2X_TPA_START)
1188 dma_unmap_single(&bp->pdev->dev,
1189 dma_unmap_addr(first_buf, mapping),
1190 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1191 bnx2x_frag_free(fp, data);
1191cb83
ED
1192 first_buf->data = NULL;
1193 }
1194}
1195
55c11941
MS
1196void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1197{
1198 int j;
1199
1200 for_each_rx_queue_cnic(bp, j) {
1201 struct bnx2x_fastpath *fp = &bp->fp[j];
1202
1203 fp->rx_bd_cons = 0;
1204
1205 /* Activate BD ring */
1206 /* Warning!
1207 * this will generate an interrupt (to the TSTORM)
1208 * must only be done after chip is initialized
1209 */
1210 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1211 fp->rx_sge_prod);
1212 }
1213}
1214
9f6c9258
DK
1215void bnx2x_init_rx_rings(struct bnx2x *bp)
1216{
1217 int func = BP_FUNC(bp);
523224a3 1218 u16 ring_prod;
9f6c9258 1219 int i, j;
25141580 1220
b3b83c3f 1221 /* Allocate TPA resources */
55c11941 1222 for_each_eth_queue(bp, j) {
523224a3 1223 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1224
a8c94b91
VZ
1225 DP(NETIF_MSG_IFUP,
1226 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1227
523224a3 1228 if (!fp->disable_tpa) {
619c5cb6 1229 /* Fill the per-aggregtion pool */
dfacf138 1230 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1231 struct bnx2x_agg_info *tpa_info =
1232 &fp->tpa_info[i];
1233 struct sw_rx_bd *first_buf =
1234 &tpa_info->first_buf;
1235
d46d132c 1236 first_buf->data = bnx2x_frag_alloc(fp);
e52fcb24 1237 if (!first_buf->data) {
51c1a580
MS
1238 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1239 j);
9f6c9258
DK
1240 bnx2x_free_tpa_pool(bp, fp, i);
1241 fp->disable_tpa = 1;
1242 break;
1243 }
619c5cb6
VZ
1244 dma_unmap_addr_set(first_buf, mapping, 0);
1245 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1246 }
523224a3
DK
1247
1248 /* "next page" elements initialization */
1249 bnx2x_set_next_page_sgl(fp);
1250
1251 /* set SGEs bit mask */
1252 bnx2x_init_sge_ring_bit_mask(fp);
1253
1254 /* Allocate SGEs and initialize the ring elements */
1255 for (i = 0, ring_prod = 0;
1256 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1257
1258 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1259 BNX2X_ERR("was only able to allocate %d rx sges\n",
1260 i);
1261 BNX2X_ERR("disabling TPA for queue[%d]\n",
1262 j);
523224a3 1263 /* Cleanup already allocated elements */
619c5cb6
VZ
1264 bnx2x_free_rx_sge_range(bp, fp,
1265 ring_prod);
1266 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1267 MAX_AGG_QS(bp));
523224a3
DK
1268 fp->disable_tpa = 1;
1269 ring_prod = 0;
1270 break;
1271 }
1272 ring_prod = NEXT_SGE_IDX(ring_prod);
1273 }
1274
1275 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1276 }
1277 }
1278
55c11941 1279 for_each_eth_queue(bp, j) {
9f6c9258
DK
1280 struct bnx2x_fastpath *fp = &bp->fp[j];
1281
1282 fp->rx_bd_cons = 0;
9f6c9258 1283
b3b83c3f
DK
1284 /* Activate BD ring */
1285 /* Warning!
1286 * this will generate an interrupt (to the TSTORM)
1287 * must only be done after chip is initialized
1288 */
1289 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1290 fp->rx_sge_prod);
9f6c9258 1291
9f6c9258
DK
1292 if (j != 0)
1293 continue;
1294
619c5cb6 1295 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1296 REG_WR(bp, BAR_USTRORM_INTMEM +
1297 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1298 U64_LO(fp->rx_comp_mapping));
1299 REG_WR(bp, BAR_USTRORM_INTMEM +
1300 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1301 U64_HI(fp->rx_comp_mapping));
1302 }
9f6c9258
DK
1303 }
1304}
f85582f8 1305
55c11941 1306static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1307{
6383c0b3 1308 u8 cos;
55c11941 1309 struct bnx2x *bp = fp->bp;
9f6c9258 1310
55c11941
MS
1311 for_each_cos_in_tx_queue(fp, cos) {
1312 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1313 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1314
55c11941
MS
1315 u16 sw_prod = txdata->tx_pkt_prod;
1316 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1317
55c11941
MS
1318 while (sw_cons != sw_prod) {
1319 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1320 &pkts_compl, &bytes_compl);
1321 sw_cons++;
9f6c9258 1322 }
55c11941
MS
1323
1324 netdev_tx_reset_queue(
1325 netdev_get_tx_queue(bp->dev,
1326 txdata->txq_index));
1327 }
1328}
1329
1330static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1331{
1332 int i;
1333
1334 for_each_tx_queue_cnic(bp, i) {
1335 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1336 }
1337}
1338
1339static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1340{
1341 int i;
1342
1343 for_each_eth_queue(bp, i) {
1344 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1345 }
1346}
1347
b3b83c3f
DK
1348static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1349{
1350 struct bnx2x *bp = fp->bp;
1351 int i;
1352
1353 /* ring wasn't allocated */
1354 if (fp->rx_buf_ring == NULL)
1355 return;
1356
1357 for (i = 0; i < NUM_RX_BD; i++) {
1358 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1359 u8 *data = rx_buf->data;
b3b83c3f 1360
e52fcb24 1361 if (data == NULL)
b3b83c3f 1362 continue;
b3b83c3f
DK
1363 dma_unmap_single(&bp->pdev->dev,
1364 dma_unmap_addr(rx_buf, mapping),
1365 fp->rx_buf_size, DMA_FROM_DEVICE);
1366
e52fcb24 1367 rx_buf->data = NULL;
d46d132c 1368 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1369 }
1370}
1371
55c11941
MS
1372static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1373{
1374 int j;
1375
1376 for_each_rx_queue_cnic(bp, j) {
1377 bnx2x_free_rx_bds(&bp->fp[j]);
1378 }
1379}
1380
9f6c9258
DK
1381static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1382{
b3b83c3f 1383 int j;
9f6c9258 1384
55c11941 1385 for_each_eth_queue(bp, j) {
9f6c9258
DK
1386 struct bnx2x_fastpath *fp = &bp->fp[j];
1387
b3b83c3f 1388 bnx2x_free_rx_bds(fp);
9f6c9258 1389
9f6c9258 1390 if (!fp->disable_tpa)
dfacf138 1391 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1392 }
1393}
1394
55c11941
MS
1395void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1396{
1397 bnx2x_free_tx_skbs_cnic(bp);
1398 bnx2x_free_rx_skbs_cnic(bp);
1399}
1400
9f6c9258
DK
1401void bnx2x_free_skbs(struct bnx2x *bp)
1402{
1403 bnx2x_free_tx_skbs(bp);
1404 bnx2x_free_rx_skbs(bp);
1405}
1406
e3835b99
DK
1407void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1408{
1409 /* load old values */
1410 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1411
1412 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1413 /* leave all but MAX value */
1414 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1415
1416 /* set new MAX value */
1417 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1418 & FUNC_MF_CFG_MAX_BW_MASK;
1419
1420 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1421 }
1422}
1423
ca92429f
DK
1424/**
1425 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1426 *
1427 * @bp: driver handle
1428 * @nvecs: number of vectors to be released
1429 */
1430static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1431{
ca92429f 1432 int i, offset = 0;
9f6c9258 1433
ca92429f
DK
1434 if (nvecs == offset)
1435 return;
ad5afc89
AE
1436
1437 /* VFs don't have a default SB */
1438 if (IS_PF(bp)) {
1439 free_irq(bp->msix_table[offset].vector, bp->dev);
1440 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1441 bp->msix_table[offset].vector);
1442 offset++;
1443 }
55c11941
MS
1444
1445 if (CNIC_SUPPORT(bp)) {
1446 if (nvecs == offset)
1447 return;
1448 offset++;
1449 }
ca92429f 1450
ec6ba945 1451 for_each_eth_queue(bp, i) {
ca92429f
DK
1452 if (nvecs == offset)
1453 return;
51c1a580
MS
1454 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1455 i, bp->msix_table[offset].vector);
9f6c9258 1456
ca92429f 1457 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1458 }
1459}
1460
d6214d7a 1461void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1462{
30a5de77 1463 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1464 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1465 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1466
1467 /* vfs don't have a default status block */
1468 if (IS_PF(bp))
1469 nvecs++;
1470
1471 bnx2x_free_msix_irqs(bp, nvecs);
1472 } else {
30a5de77 1473 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1474 }
9f6c9258
DK
1475}
1476
0e8d2ec5 1477int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1478{
1ab4434c 1479 int msix_vec = 0, i, rc;
9f6c9258 1480
1ab4434c
AE
1481 /* VFs don't have a default status block */
1482 if (IS_PF(bp)) {
1483 bp->msix_table[msix_vec].entry = msix_vec;
1484 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1485 bp->msix_table[0].entry);
1486 msix_vec++;
1487 }
9f6c9258 1488
55c11941
MS
1489 /* Cnic requires an msix vector for itself */
1490 if (CNIC_SUPPORT(bp)) {
1491 bp->msix_table[msix_vec].entry = msix_vec;
1492 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1493 msix_vec, bp->msix_table[msix_vec].entry);
1494 msix_vec++;
1495 }
1496
6383c0b3 1497 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1498 for_each_eth_queue(bp, i) {
d6214d7a 1499 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1500 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1501 msix_vec, msix_vec, i);
d6214d7a 1502 msix_vec++;
9f6c9258
DK
1503 }
1504
1ab4434c
AE
1505 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1506 msix_vec);
d6214d7a 1507
1ab4434c 1508 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1509
1510 /*
1511 * reconfigure number of tx/rx queues according to available
1512 * MSI-X vectors
1513 */
55c11941 1514 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1515 /* how less vectors we will have? */
1ab4434c 1516 int diff = msix_vec - rc;
9f6c9258 1517
51c1a580 1518 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1519
1520 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1521
1522 if (rc) {
30a5de77
DK
1523 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1524 goto no_msix;
9f6c9258 1525 }
d6214d7a
DK
1526 /*
1527 * decrease number of queues by number of unallocated entries
1528 */
55c11941
MS
1529 bp->num_ethernet_queues -= diff;
1530 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1531
51c1a580 1532 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1533 bp->num_queues);
1534 } else if (rc > 0) {
1535 /* Get by with single vector */
1536 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1537 if (rc) {
1538 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1539 rc);
1540 goto no_msix;
1541 }
1542
1543 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1544 bp->flags |= USING_SINGLE_MSIX_FLAG;
1545
55c11941
MS
1546 BNX2X_DEV_INFO("set number of queues to 1\n");
1547 bp->num_ethernet_queues = 1;
1548 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1549 } else if (rc < 0) {
51c1a580 1550 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1551 goto no_msix;
9f6c9258
DK
1552 }
1553
1554 bp->flags |= USING_MSIX_FLAG;
1555
1556 return 0;
30a5de77
DK
1557
1558no_msix:
1559 /* fall to INTx if not enough memory */
1560 if (rc == -ENOMEM)
1561 bp->flags |= DISABLE_MSI_FLAG;
1562
1563 return rc;
9f6c9258
DK
1564}
1565
1566static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1567{
ca92429f 1568 int i, rc, offset = 0;
9f6c9258 1569
ad5afc89
AE
1570 /* no default status block for vf */
1571 if (IS_PF(bp)) {
1572 rc = request_irq(bp->msix_table[offset++].vector,
1573 bnx2x_msix_sp_int, 0,
1574 bp->dev->name, bp->dev);
1575 if (rc) {
1576 BNX2X_ERR("request sp irq failed\n");
1577 return -EBUSY;
1578 }
9f6c9258
DK
1579 }
1580
55c11941
MS
1581 if (CNIC_SUPPORT(bp))
1582 offset++;
1583
ec6ba945 1584 for_each_eth_queue(bp, i) {
9f6c9258
DK
1585 struct bnx2x_fastpath *fp = &bp->fp[i];
1586 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1587 bp->dev->name, i);
1588
d6214d7a 1589 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1590 bnx2x_msix_fp_int, 0, fp->name, fp);
1591 if (rc) {
ca92429f
DK
1592 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1593 bp->msix_table[offset].vector, rc);
1594 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1595 return -EBUSY;
1596 }
1597
d6214d7a 1598 offset++;
9f6c9258
DK
1599 }
1600
ec6ba945 1601 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1602 if (IS_PF(bp)) {
1603 offset = 1 + CNIC_SUPPORT(bp);
1604 netdev_info(bp->dev,
1605 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1606 bp->msix_table[0].vector,
1607 0, bp->msix_table[offset].vector,
1608 i - 1, bp->msix_table[offset + i - 1].vector);
1609 } else {
1610 offset = CNIC_SUPPORT(bp);
1611 netdev_info(bp->dev,
1612 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1613 0, bp->msix_table[offset].vector,
1614 i - 1, bp->msix_table[offset + i - 1].vector);
1615 }
9f6c9258
DK
1616 return 0;
1617}
1618
d6214d7a 1619int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1620{
1621 int rc;
1622
1623 rc = pci_enable_msi(bp->pdev);
1624 if (rc) {
51c1a580 1625 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1626 return -1;
1627 }
1628 bp->flags |= USING_MSI_FLAG;
1629
1630 return 0;
1631}
1632
1633static int bnx2x_req_irq(struct bnx2x *bp)
1634{
1635 unsigned long flags;
30a5de77 1636 unsigned int irq;
9f6c9258 1637
30a5de77 1638 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1639 flags = 0;
1640 else
1641 flags = IRQF_SHARED;
1642
30a5de77
DK
1643 if (bp->flags & USING_MSIX_FLAG)
1644 irq = bp->msix_table[0].vector;
1645 else
1646 irq = bp->pdev->irq;
1647
1648 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1649}
1650
1191cb83 1651static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1652{
1653 int rc = 0;
30a5de77
DK
1654 if (bp->flags & USING_MSIX_FLAG &&
1655 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1656 rc = bnx2x_req_msix_irqs(bp);
1657 if (rc)
1658 return rc;
1659 } else {
1660 bnx2x_ack_int(bp);
1661 rc = bnx2x_req_irq(bp);
1662 if (rc) {
1663 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1664 return rc;
1665 }
1666 if (bp->flags & USING_MSI_FLAG) {
1667 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1668 netdev_info(bp->dev, "using MSI IRQ %d\n",
1669 bp->dev->irq);
1670 }
1671 if (bp->flags & USING_MSIX_FLAG) {
1672 bp->dev->irq = bp->msix_table[0].vector;
1673 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1674 bp->dev->irq);
619c5cb6
VZ
1675 }
1676 }
1677
1678 return 0;
1679}
1680
55c11941
MS
1681static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1682{
1683 int i;
1684
1685 for_each_rx_queue_cnic(bp, i)
1686 napi_enable(&bnx2x_fp(bp, i, napi));
1687}
1688
1191cb83 1689static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1690{
1691 int i;
1692
55c11941 1693 for_each_eth_queue(bp, i)
9f6c9258
DK
1694 napi_enable(&bnx2x_fp(bp, i, napi));
1695}
1696
55c11941
MS
1697static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1698{
1699 int i;
1700
1701 for_each_rx_queue_cnic(bp, i)
1702 napi_disable(&bnx2x_fp(bp, i, napi));
1703}
1704
1191cb83 1705static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1706{
1707 int i;
1708
55c11941 1709 for_each_eth_queue(bp, i)
9f6c9258
DK
1710 napi_disable(&bnx2x_fp(bp, i, napi));
1711}
1712
1713void bnx2x_netif_start(struct bnx2x *bp)
1714{
4b7ed897
DK
1715 if (netif_running(bp->dev)) {
1716 bnx2x_napi_enable(bp);
55c11941
MS
1717 if (CNIC_LOADED(bp))
1718 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1719 bnx2x_int_enable(bp);
1720 if (bp->state == BNX2X_STATE_OPEN)
1721 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1722 }
1723}
1724
1725void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1726{
1727 bnx2x_int_disable_sync(bp, disable_hw);
1728 bnx2x_napi_disable(bp);
55c11941
MS
1729 if (CNIC_LOADED(bp))
1730 bnx2x_napi_disable_cnic(bp);
9f6c9258 1731}
9f6c9258 1732
8307fa3e
VZ
1733u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1734{
8307fa3e 1735 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1736
55c11941 1737 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1738 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1739 u16 ether_type = ntohs(hdr->h_proto);
1740
1741 /* Skip VLAN tag if present */
1742 if (ether_type == ETH_P_8021Q) {
1743 struct vlan_ethhdr *vhdr =
1744 (struct vlan_ethhdr *)skb->data;
1745
1746 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1747 }
1748
1749 /* If ethertype is FCoE or FIP - use FCoE ring */
1750 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1751 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1752 }
55c11941 1753
cdb9d6ae 1754 /* select a non-FCoE queue */
6383c0b3 1755 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1756}
1757
96305234 1758
d6214d7a
DK
1759void bnx2x_set_num_queues(struct bnx2x *bp)
1760{
96305234 1761 /* RSS queues */
55c11941 1762 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1763
a3348722
BW
1764 /* override in STORAGE SD modes */
1765 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1766 bp->num_ethernet_queues = 1;
1767
ec6ba945 1768 /* Add special queues */
55c11941
MS
1769 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1770 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1771
1772 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1773}
1774
cdb9d6ae
VZ
1775/**
1776 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1777 *
1778 * @bp: Driver handle
1779 *
1780 * We currently support for at most 16 Tx queues for each CoS thus we will
1781 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1782 * bp->max_cos.
1783 *
1784 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1785 * index after all ETH L2 indices.
1786 *
1787 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1788 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1789 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1790 *
1791 * The proper configuration of skb->queue_mapping is handled by
1792 * bnx2x_select_queue() and __skb_tx_hash().
1793 *
1794 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1795 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1796 */
55c11941 1797static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1798{
6383c0b3 1799 int rc, tx, rx;
ec6ba945 1800
65565884 1801 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1802 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1803
6383c0b3 1804/* account for fcoe queue */
55c11941
MS
1805 if (include_cnic && !NO_FCOE(bp)) {
1806 rx++;
1807 tx++;
6383c0b3 1808 }
6383c0b3
AE
1809
1810 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1811 if (rc) {
1812 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1813 return rc;
1814 }
1815 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1816 if (rc) {
1817 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1818 return rc;
1819 }
1820
51c1a580 1821 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1822 tx, rx);
1823
ec6ba945
VZ
1824 return rc;
1825}
1826
1191cb83 1827static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1828{
1829 int i;
1830
1831 for_each_queue(bp, i) {
1832 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1833 u32 mtu;
a8c94b91
VZ
1834
1835 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1836 if (IS_FCOE_IDX(i))
1837 /*
1838 * Although there are no IP frames expected to arrive to
1839 * this ring we still want to add an
1840 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1841 * overrun attack.
1842 */
e52fcb24 1843 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1844 else
e52fcb24
ED
1845 mtu = bp->dev->mtu;
1846 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1847 IP_HEADER_ALIGNMENT_PADDING +
1848 ETH_OVREHEAD +
1849 mtu +
1850 BNX2X_FW_RX_ALIGN_END;
1851 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
d46d132c
ED
1852 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1853 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1854 else
1855 fp->rx_frag_size = 0;
a8c94b91
VZ
1856 }
1857}
1858
1191cb83 1859static int bnx2x_init_rss_pf(struct bnx2x *bp)
619c5cb6
VZ
1860{
1861 int i;
619c5cb6
VZ
1862 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1863
96305234 1864 /* Prepare the initial contents fo the indirection table if RSS is
619c5cb6
VZ
1865 * enabled
1866 */
5d317c6a
MS
1867 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1868 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1869 bp->fp->cl_id +
1870 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1871
1872 /*
1873 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1874 * per-port, so if explicit configuration is needed , do it only
1875 * for a PMF.
1876 *
1877 * For 57712 and newer on the other hand it's a per-function
1878 * configuration.
1879 */
5d317c6a 1880 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1881}
1882
96305234 1883int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
5d317c6a 1884 bool config_hash)
619c5cb6 1885{
3b603066 1886 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1887
1888 /* Although RSS is meaningless when there is a single HW queue we
1889 * still need it enabled in order to have HW Rx hash generated.
1890 *
1891 * if (!is_eth_multi(bp))
1892 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1893 */
1894
96305234 1895 params.rss_obj = rss_obj;
619c5cb6
VZ
1896
1897 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1898
96305234 1899 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
619c5cb6 1900
96305234
DK
1901 /* RSS configuration */
1902 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1903 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1904 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1905 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
5d317c6a
MS
1906 if (rss_obj->udp_rss_v4)
1907 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1908 if (rss_obj->udp_rss_v6)
1909 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
619c5cb6 1910
96305234
DK
1911 /* Hash bits */
1912 params.rss_result_mask = MULTI_MASK;
619c5cb6 1913
5d317c6a 1914 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 1915
96305234
DK
1916 if (config_hash) {
1917 /* RSS keys */
8376d0bc 1918 prandom_bytes(params.rss_key, sizeof(params.rss_key));
96305234 1919 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
1920 }
1921
1922 return bnx2x_config_rss(bp, &params);
1923}
1924
1191cb83 1925static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 1926{
3b603066 1927 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
1928
1929 /* Prepare parameters for function state transitions */
1930 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1931
1932 func_params.f_obj = &bp->func_obj;
1933 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1934
1935 func_params.params.hw_init.load_phase = load_code;
1936
1937 return bnx2x_func_state_change(bp, &func_params);
1938}
1939
1940/*
1941 * Cleans the object that have internal lists without sending
1942 * ramrods. Should be run when interrutps are disabled.
1943 */
1944static void bnx2x_squeeze_objects(struct bnx2x *bp)
1945{
1946 int rc;
1947 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 1948 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 1949 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
1950
1951 /***************** Cleanup MACs' object first *************************/
1952
1953 /* Wait for completion of requested */
1954 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1955 /* Perform a dry cleanup */
1956 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1957
1958 /* Clean ETH primary MAC */
1959 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 1960 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
1961 &ramrod_flags);
1962 if (rc != 0)
1963 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1964
1965 /* Cleanup UC list */
1966 vlan_mac_flags = 0;
1967 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1968 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1969 &ramrod_flags);
1970 if (rc != 0)
1971 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1972
1973 /***************** Now clean mcast object *****************************/
1974 rparam.mcast_obj = &bp->mcast_obj;
1975 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1976
1977 /* Add a DEL command... */
1978 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1979 if (rc < 0)
51c1a580
MS
1980 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1981 rc);
619c5cb6
VZ
1982
1983 /* ...and wait until all pending commands are cleared */
1984 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1985 while (rc != 0) {
1986 if (rc < 0) {
1987 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1988 rc);
1989 return;
1990 }
1991
1992 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1993 }
1994}
1995
1996#ifndef BNX2X_STOP_ON_ERROR
1997#define LOAD_ERROR_EXIT(bp, label) \
1998 do { \
1999 (bp)->state = BNX2X_STATE_ERROR; \
2000 goto label; \
2001 } while (0)
55c11941
MS
2002
2003#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2004 do { \
2005 bp->cnic_loaded = false; \
2006 goto label; \
2007 } while (0)
2008#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2009#define LOAD_ERROR_EXIT(bp, label) \
2010 do { \
2011 (bp)->state = BNX2X_STATE_ERROR; \
2012 (bp)->panic = 1; \
2013 return -EBUSY; \
2014 } while (0)
55c11941
MS
2015#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2016 do { \
2017 bp->cnic_loaded = false; \
2018 (bp)->panic = 1; \
2019 return -EBUSY; \
2020 } while (0)
2021#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2022
ad5afc89
AE
2023static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2024{
2025 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2026 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2027 return;
2028}
2029
2030static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2031{
8db573ba 2032 int num_groups, vf_headroom = 0;
ad5afc89 2033 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2034
ad5afc89
AE
2035 /* number of queues for statistics is number of eth queues + FCoE */
2036 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2037
ad5afc89
AE
2038 /* Total number of FW statistics requests =
2039 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2040 * and fcoe l2 queue) stats + num of queues (which includes another 1
2041 * for fcoe l2 queue if applicable)
2042 */
2043 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2044
8db573ba
AE
2045 /* vf stats appear in the request list, but their data is allocated by
2046 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2047 * it is used to determine where to place the vf stats queries in the
2048 * request struct
2049 */
2050 if (IS_SRIOV(bp))
2051 vf_headroom = bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
2052
ad5afc89
AE
2053 /* Request is built from stats_query_header and an array of
2054 * stats_query_cmd_group each of which contains
2055 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2056 * configured in the stats_query_header.
2057 */
2058 num_groups =
8db573ba
AE
2059 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2060 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2061 1 : 0));
2062
8db573ba
AE
2063 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2064 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2065 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2066 num_groups * sizeof(struct stats_query_cmd_group);
2067
2068 /* Data for statistics requests + stats_counter
2069 * stats_counter holds per-STORM counters that are incremented
2070 * when STORM has finished with the current request.
2071 * memory for FCoE offloaded statistics are counted anyway,
2072 * even if they will not be sent.
2073 * VF stats are not accounted for here as the data of VF stats is stored
2074 * in memory allocated by the VF, not here.
2075 */
2076 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2077 sizeof(struct per_pf_stats) +
2078 sizeof(struct fcoe_statistics_params) +
2079 sizeof(struct per_queue_stats) * num_queue_stats +
2080 sizeof(struct stats_counter);
2081
2082 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2083 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2084
2085 /* Set shortcuts */
2086 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2087 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2088 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2089 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2090 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2091 bp->fw_stats_req_sz;
2092
2093 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2094 U64_HI(bp->fw_stats_req_mapping),
2095 U64_LO(bp->fw_stats_req_mapping));
2096 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2097 U64_HI(bp->fw_stats_data_mapping),
2098 U64_LO(bp->fw_stats_data_mapping));
2099 return 0;
2100
2101alloc_mem_err:
2102 bnx2x_free_fw_stats_mem(bp);
2103 BNX2X_ERR("Can't allocate FW stats memory\n");
2104 return -ENOMEM;
2105}
2106
2107/* send load request to mcp and analyze response */
2108static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2109{
2110 /* init fw_seq */
2111 bp->fw_seq =
2112 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2113 DRV_MSG_SEQ_NUMBER_MASK);
2114 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2115
2116 /* Get current FW pulse sequence */
2117 bp->fw_drv_pulse_wr_seq =
2118 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2119 DRV_PULSE_SEQ_MASK);
2120 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2121
2122 /* load request */
2123 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2124 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2125
2126 /* if mcp fails to respond we must abort */
2127 if (!(*load_code)) {
2128 BNX2X_ERR("MCP response failure, aborting\n");
2129 return -EBUSY;
2130 }
2131
2132 /* If mcp refused (e.g. other port is in diagnostic mode) we
2133 * must abort
2134 */
2135 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2136 BNX2X_ERR("MCP refused load request, aborting\n");
2137 return -EBUSY;
2138 }
2139 return 0;
2140}
2141
2142/* check whether another PF has already loaded FW to chip. In
2143 * virtualized environments a pf from another VM may have already
2144 * initialized the device including loading FW
2145 */
2146int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2147{
2148 /* is another pf loaded on this engine? */
2149 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2150 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2151 /* build my FW version dword */
2152 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2153 (BCM_5710_FW_MINOR_VERSION << 8) +
2154 (BCM_5710_FW_REVISION_VERSION << 16) +
2155 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2156
2157 /* read loaded FW from chip */
2158 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2159
2160 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2161 loaded_fw, my_fw);
2162
2163 /* abort nic load if version mismatch */
2164 if (my_fw != loaded_fw) {
2165 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
452427b0 2166 loaded_fw, my_fw);
ad5afc89
AE
2167 return -EBUSY;
2168 }
2169 }
2170 return 0;
2171}
2172
2173/* returns the "mcp load_code" according to global load_count array */
2174static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2175{
2176 int path = BP_PATH(bp);
2177
2178 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2179 path, load_count[path][0], load_count[path][1],
2180 load_count[path][2]);
2181 load_count[path][0]++;
2182 load_count[path][1 + port]++;
2183 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2184 path, load_count[path][0], load_count[path][1],
2185 load_count[path][2]);
2186 if (load_count[path][0] == 1)
2187 return FW_MSG_CODE_DRV_LOAD_COMMON;
2188 else if (load_count[path][1 + port] == 1)
2189 return FW_MSG_CODE_DRV_LOAD_PORT;
2190 else
2191 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2192}
2193
2194/* mark PMF if applicable */
2195static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2196{
2197 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2198 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2199 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2200 bp->port.pmf = 1;
2201 /* We need the barrier to ensure the ordering between the
2202 * writing to bp->port.pmf here and reading it from the
2203 * bnx2x_periodic_task().
2204 */
2205 smp_mb();
2206 } else {
2207 bp->port.pmf = 0;
452427b0
YM
2208 }
2209
ad5afc89
AE
2210 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2211}
2212
2213static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2214{
2215 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2216 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2217 (bp->common.shmem2_base)) {
2218 if (SHMEM2_HAS(bp, dcc_support))
2219 SHMEM2_WR(bp, dcc_support,
2220 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2221 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2222 if (SHMEM2_HAS(bp, afex_driver_support))
2223 SHMEM2_WR(bp, afex_driver_support,
2224 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2225 }
2226
2227 /* Set AFEX default VLAN tag to an invalid value */
2228 bp->afex_def_vlan_tag = -1;
452427b0
YM
2229}
2230
1191cb83
ED
2231/**
2232 * bnx2x_bz_fp - zero content of the fastpath structure.
2233 *
2234 * @bp: driver handle
2235 * @index: fastpath index to be zeroed
2236 *
2237 * Makes sure the contents of the bp->fp[index].napi is kept
2238 * intact.
2239 */
2240static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2241{
2242 struct bnx2x_fastpath *fp = &bp->fp[index];
15192a8c
BW
2243 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2244
65565884 2245 int cos;
1191cb83 2246 struct napi_struct orig_napi = fp->napi;
15192a8c 2247 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1191cb83 2248 /* bzero bnx2x_fastpath contents */
15192a8c
BW
2249 if (bp->stats_init) {
2250 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1191cb83 2251 memset(fp, 0, sizeof(*fp));
15192a8c 2252 } else {
1191cb83
ED
2253 /* Keep Queue statistics */
2254 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2255 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2256
2257 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2258 GFP_KERNEL);
2259 if (tmp_eth_q_stats)
15192a8c 2260 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
1191cb83
ED
2261 sizeof(struct bnx2x_eth_q_stats));
2262
2263 tmp_eth_q_stats_old =
2264 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2265 GFP_KERNEL);
2266 if (tmp_eth_q_stats_old)
15192a8c 2267 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
1191cb83
ED
2268 sizeof(struct bnx2x_eth_q_stats_old));
2269
15192a8c 2270 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1191cb83
ED
2271 memset(fp, 0, sizeof(*fp));
2272
2273 if (tmp_eth_q_stats) {
15192a8c
BW
2274 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2275 sizeof(struct bnx2x_eth_q_stats));
1191cb83
ED
2276 kfree(tmp_eth_q_stats);
2277 }
2278
2279 if (tmp_eth_q_stats_old) {
15192a8c 2280 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
1191cb83
ED
2281 sizeof(struct bnx2x_eth_q_stats_old));
2282 kfree(tmp_eth_q_stats_old);
2283 }
2284
2285 }
2286
2287 /* Restore the NAPI object as it has been already initialized */
2288 fp->napi = orig_napi;
15192a8c 2289 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2290 fp->bp = bp;
2291 fp->index = index;
2292 if (IS_ETH_FP(fp))
2293 fp->max_cos = bp->max_cos;
2294 else
2295 /* Special queues support only one CoS */
2296 fp->max_cos = 1;
2297
65565884 2298 /* Init txdata pointers */
65565884
MS
2299 if (IS_FCOE_FP(fp))
2300 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2301 if (IS_ETH_FP(fp))
2302 for_each_cos_in_tx_queue(fp, cos)
2303 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2304 BNX2X_NUM_ETH_QUEUES(bp) + index];
2305
1191cb83
ED
2306 /*
2307 * set the tpa flag for each queue. The tpa flag determines the queue
2308 * minimal size so it must be set prior to queue memory allocation
2309 */
2310 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2311 (bp->flags & GRO_ENABLE_FLAG &&
2312 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2313 if (bp->flags & TPA_ENABLE_FLAG)
2314 fp->mode = TPA_MODE_LRO;
2315 else if (bp->flags & GRO_ENABLE_FLAG)
2316 fp->mode = TPA_MODE_GRO;
2317
1191cb83
ED
2318 /* We don't want TPA on an FCoE L2 ring */
2319 if (IS_FCOE_FP(fp))
2320 fp->disable_tpa = 1;
55c11941
MS
2321}
2322
2323int bnx2x_load_cnic(struct bnx2x *bp)
2324{
2325 int i, rc, port = BP_PORT(bp);
2326
2327 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2328
2329 mutex_init(&bp->cnic_mutex);
2330
ad5afc89
AE
2331 if (IS_PF(bp)) {
2332 rc = bnx2x_alloc_mem_cnic(bp);
2333 if (rc) {
2334 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2335 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2336 }
55c11941
MS
2337 }
2338
2339 rc = bnx2x_alloc_fp_mem_cnic(bp);
2340 if (rc) {
2341 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2342 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2343 }
2344
2345 /* Update the number of queues with the cnic queues */
2346 rc = bnx2x_set_real_num_queues(bp, 1);
2347 if (rc) {
2348 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2349 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2350 }
2351
2352 /* Add all CNIC NAPI objects */
2353 bnx2x_add_all_napi_cnic(bp);
2354 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2355 bnx2x_napi_enable_cnic(bp);
2356
2357 rc = bnx2x_init_hw_func_cnic(bp);
2358 if (rc)
2359 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2360
2361 bnx2x_nic_init_cnic(bp);
2362
ad5afc89
AE
2363 if (IS_PF(bp)) {
2364 /* Enable Timer scan */
2365 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2366
2367 /* setup cnic queues */
2368 for_each_cnic_queue(bp, i) {
2369 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2370 if (rc) {
2371 BNX2X_ERR("Queue setup failed\n");
2372 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2373 }
55c11941
MS
2374 }
2375 }
2376
2377 /* Initialize Rx filter. */
2378 netif_addr_lock_bh(bp->dev);
2379 bnx2x_set_rx_mode(bp->dev);
2380 netif_addr_unlock_bh(bp->dev);
2381
2382 /* re-read iscsi info */
2383 bnx2x_get_iscsi_info(bp);
2384 bnx2x_setup_cnic_irq_info(bp);
2385 bnx2x_setup_cnic_info(bp);
2386 bp->cnic_loaded = true;
2387 if (bp->state == BNX2X_STATE_OPEN)
2388 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2389
2390
2391 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2392
2393 return 0;
2394
2395#ifndef BNX2X_STOP_ON_ERROR
2396load_error_cnic2:
2397 /* Disable Timer scan */
2398 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2399
2400load_error_cnic1:
2401 bnx2x_napi_disable_cnic(bp);
2402 /* Update the number of queues without the cnic queues */
2403 rc = bnx2x_set_real_num_queues(bp, 0);
2404 if (rc)
2405 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2406load_error_cnic0:
2407 BNX2X_ERR("CNIC-related load failed\n");
2408 bnx2x_free_fp_mem_cnic(bp);
2409 bnx2x_free_mem_cnic(bp);
2410 return rc;
2411#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2412}
2413
2414
9f6c9258
DK
2415/* must be called with rtnl_lock */
2416int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2417{
619c5cb6 2418 int port = BP_PORT(bp);
ad5afc89 2419 int i, rc = 0, load_code = 0;
9f6c9258 2420
55c11941
MS
2421 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2422 DP(NETIF_MSG_IFUP,
2423 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2424
9f6c9258 2425#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2426 if (unlikely(bp->panic)) {
2427 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2428 return -EPERM;
51c1a580 2429 }
9f6c9258
DK
2430#endif
2431
2432 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2433
2ae17f66
VZ
2434 /* Set the initial link reported state to link down */
2435 bnx2x_acquire_phy_lock(bp);
2436 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2437 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2438 &bp->last_reported_link.link_report_flags);
2439 bnx2x_release_phy_lock(bp);
2440
ad5afc89
AE
2441 if (IS_PF(bp))
2442 /* must be called before memory allocation and HW init */
2443 bnx2x_ilt_set_info(bp);
523224a3 2444
6383c0b3
AE
2445 /*
2446 * Zero fastpath structures preserving invariants like napi, which are
2447 * allocated only once, fp index, max_cos, bp pointer.
65565884 2448 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2449 */
51c1a580 2450 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2451 for_each_queue(bp, i)
2452 bnx2x_bz_fp(bp, i);
55c11941
MS
2453 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2454 bp->num_cnic_queues) *
2455 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2456
55c11941 2457 bp->fcoe_init = false;
6383c0b3 2458
a8c94b91
VZ
2459 /* Set the receive queues buffer size */
2460 bnx2x_set_rx_buf_size(bp);
2461
ad5afc89
AE
2462 if (IS_PF(bp)) {
2463 rc = bnx2x_alloc_mem(bp);
2464 if (rc) {
2465 BNX2X_ERR("Unable to allocate bp memory\n");
2466 return rc;
2467 }
2468 }
2469
2470 /* Allocated memory for FW statistics */
2471 if (bnx2x_alloc_fw_stats_mem(bp))
2472 LOAD_ERROR_EXIT(bp, load_error0);
2473
2474 /* need to be done after alloc mem, since it's self adjusting to amount
2475 * of memory available for RSS queues
2476 */
2477 rc = bnx2x_alloc_fp_mem(bp);
2478 if (rc) {
2479 BNX2X_ERR("Unable to allocate memory for fps\n");
2480 LOAD_ERROR_EXIT(bp, load_error0);
2481 }
d6214d7a 2482
8d9ac297
AE
2483 /* request pf to initialize status blocks */
2484 if (IS_VF(bp)) {
2485 rc = bnx2x_vfpf_init(bp);
2486 if (rc)
2487 LOAD_ERROR_EXIT(bp, load_error0);
2488 }
2489
b3b83c3f
DK
2490 /* As long as bnx2x_alloc_mem() may possibly update
2491 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2492 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2493 */
55c11941 2494 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2495 if (rc) {
ec6ba945 2496 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2497 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2498 }
2499
6383c0b3
AE
2500 /* configure multi cos mappings in kernel.
2501 * this configuration may be overriden by a multi class queue discipline
2502 * or by a dcbx negotiation result.
2503 */
2504 bnx2x_setup_tc(bp->dev, bp->max_cos);
2505
26614ba5
MS
2506 /* Add all NAPI objects */
2507 bnx2x_add_all_napi(bp);
55c11941 2508 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2509 bnx2x_napi_enable(bp);
2510
ad5afc89
AE
2511 if (IS_PF(bp)) {
2512 /* set pf load just before approaching the MCP */
2513 bnx2x_set_pf_load(bp);
2514
2515 /* if mcp exists send load request and analyze response */
2516 if (!BP_NOMCP(bp)) {
2517 /* attempt to load pf */
2518 rc = bnx2x_nic_load_request(bp, &load_code);
2519 if (rc)
2520 LOAD_ERROR_EXIT(bp, load_error1);
2521
2522 /* what did mcp say? */
2523 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2524 if (rc) {
2525 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2526 LOAD_ERROR_EXIT(bp, load_error2);
2527 }
ad5afc89
AE
2528 } else {
2529 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2530 }
9f6c9258 2531
ad5afc89
AE
2532 /* mark pmf if applicable */
2533 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2534
ad5afc89
AE
2535 /* Init Function state controlling object */
2536 bnx2x__init_func_obj(bp);
6383c0b3 2537
ad5afc89
AE
2538 /* Initialize HW */
2539 rc = bnx2x_init_hw(bp, load_code);
2540 if (rc) {
2541 BNX2X_ERR("HW init failed, aborting\n");
2542 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2543 LOAD_ERROR_EXIT(bp, load_error2);
2544 }
9f6c9258
DK
2545 }
2546
d6214d7a
DK
2547 /* Connect to IRQs */
2548 rc = bnx2x_setup_irqs(bp);
523224a3 2549 if (rc) {
ad5afc89
AE
2550 BNX2X_ERR("setup irqs failed\n");
2551 if (IS_PF(bp))
2552 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2553 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2554 }
2555
9f6c9258
DK
2556 /* Setup NIC internals and enable interrupts */
2557 bnx2x_nic_init(bp, load_code);
2558
619c5cb6 2559 /* Init per-function objects */
ad5afc89
AE
2560 if (IS_PF(bp)) {
2561 bnx2x_init_bp_objs(bp);
b56e9670 2562 bnx2x_iov_nic_init(bp);
a3348722 2563
ad5afc89
AE
2564 /* Set AFEX default VLAN tag to an invalid value */
2565 bp->afex_def_vlan_tag = -1;
2566 bnx2x_nic_load_afex_dcc(bp, load_code);
2567 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2568 rc = bnx2x_func_start(bp);
2569 if (rc) {
2570 BNX2X_ERR("Function start failed!\n");
2571 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2572
619c5cb6 2573 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2574 }
9f6c9258 2575
ad5afc89
AE
2576 /* Send LOAD_DONE command to MCP */
2577 if (!BP_NOMCP(bp)) {
2578 load_code = bnx2x_fw_command(bp,
2579 DRV_MSG_CODE_LOAD_DONE, 0);
2580 if (!load_code) {
2581 BNX2X_ERR("MCP response failure, aborting\n");
2582 rc = -EBUSY;
2583 LOAD_ERROR_EXIT(bp, load_error3);
2584 }
2585 }
9f6c9258 2586
ad5afc89
AE
2587 /* setup the leading queue */
2588 rc = bnx2x_setup_leading(bp);
51c1a580 2589 if (rc) {
ad5afc89 2590 BNX2X_ERR("Setup leading failed!\n");
55c11941 2591 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2592 }
523224a3 2593
ad5afc89
AE
2594 /* set up the rest of the queues */
2595 for_each_nondefault_eth_queue(bp, i) {
2596 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2597 if (rc) {
2598 BNX2X_ERR("Queue setup failed\n");
2599 LOAD_ERROR_EXIT(bp, load_error3);
2600 }
2601 }
2602
2603 /* setup rss */
2604 rc = bnx2x_init_rss_pf(bp);
2605 if (rc) {
2606 BNX2X_ERR("PF RSS init failed\n");
2607 LOAD_ERROR_EXIT(bp, load_error3);
2608 }
8d9ac297
AE
2609
2610 } else { /* vf */
2611 for_each_eth_queue(bp, i) {
2612 rc = bnx2x_vfpf_setup_q(bp, i);
2613 if (rc) {
2614 BNX2X_ERR("Queue setup failed\n");
2615 LOAD_ERROR_EXIT(bp, load_error3);
2616 }
2617 }
51c1a580 2618 }
619c5cb6 2619
523224a3
DK
2620 /* Now when Clients are configured we are ready to work */
2621 bp->state = BNX2X_STATE_OPEN;
2622
619c5cb6 2623 /* Configure a ucast MAC */
ad5afc89
AE
2624 if (IS_PF(bp))
2625 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297
AE
2626 else /* vf */
2627 rc = bnx2x_vfpf_set_mac(bp);
51c1a580
MS
2628 if (rc) {
2629 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2630 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2631 }
6e30dd4e 2632
ad5afc89 2633 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2634 bnx2x_update_max_mf_config(bp, bp->pending_max);
2635 bp->pending_max = 0;
2636 }
2637
ad5afc89
AE
2638 if (bp->port.pmf) {
2639 rc = bnx2x_initial_phy_init(bp, load_mode);
2640 if (rc)
2641 LOAD_ERROR_EXIT(bp, load_error3);
2642 }
c63da990 2643 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2644
619c5cb6
VZ
2645 /* Start fast path */
2646
2647 /* Initialize Rx filter. */
2648 netif_addr_lock_bh(bp->dev);
6e30dd4e 2649 bnx2x_set_rx_mode(bp->dev);
619c5cb6 2650 netif_addr_unlock_bh(bp->dev);
6e30dd4e 2651
619c5cb6 2652 /* Start the Tx */
9f6c9258
DK
2653 switch (load_mode) {
2654 case LOAD_NORMAL:
523224a3
DK
2655 /* Tx queue should be only reenabled */
2656 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2657 break;
2658
2659 case LOAD_OPEN:
2660 netif_tx_start_all_queues(bp->dev);
523224a3 2661 smp_mb__after_clear_bit();
9f6c9258
DK
2662 break;
2663
2664 case LOAD_DIAG:
8970b2e4 2665 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2666 bp->state = BNX2X_STATE_DIAG;
2667 break;
2668
2669 default:
2670 break;
2671 }
2672
00253a8c 2673 if (bp->port.pmf)
4c704899 2674 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2675 else
9f6c9258
DK
2676 bnx2x__link_status_update(bp);
2677
2678 /* start the timer */
2679 mod_timer(&bp->timer, jiffies + bp->current_interval);
2680
55c11941
MS
2681 if (CNIC_ENABLED(bp))
2682 bnx2x_load_cnic(bp);
9f6c9258 2683
ad5afc89
AE
2684 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2685 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2686 u32 val;
2687 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2688 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2689 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2690 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2691 }
2692
619c5cb6 2693 /* Wait for all pending SP commands to complete */
ad5afc89 2694 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2695 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2696 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2697 return -EBUSY;
2698 }
6891dd25 2699
9876879f
BW
2700 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2701 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2702 bnx2x_dcbx_init(bp, false);
2703
55c11941
MS
2704 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2705
9f6c9258
DK
2706 return 0;
2707
619c5cb6 2708#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2709load_error3:
ad5afc89
AE
2710 if (IS_PF(bp)) {
2711 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2712
ad5afc89
AE
2713 /* Clean queueable objects */
2714 bnx2x_squeeze_objects(bp);
2715 }
619c5cb6 2716
9f6c9258
DK
2717 /* Free SKBs, SGEs, TPA pool and driver internals */
2718 bnx2x_free_skbs(bp);
ec6ba945 2719 for_each_rx_queue(bp, i)
9f6c9258 2720 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2721
9f6c9258 2722 /* Release IRQs */
d6214d7a
DK
2723 bnx2x_free_irq(bp);
2724load_error2:
ad5afc89 2725 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2726 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2727 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2728 }
2729
2730 bp->port.pmf = 0;
9f6c9258
DK
2731load_error1:
2732 bnx2x_napi_disable(bp);
ad5afc89 2733
889b9af3 2734 /* clear pf_load status, as it was already set */
ad5afc89
AE
2735 if (IS_PF(bp))
2736 bnx2x_clear_pf_load(bp);
d6214d7a 2737load_error0:
ad5afc89
AE
2738 bnx2x_free_fp_mem(bp);
2739 bnx2x_free_fw_stats_mem(bp);
9f6c9258
DK
2740 bnx2x_free_mem(bp);
2741
2742 return rc;
619c5cb6 2743#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2744}
2745
ad5afc89
AE
2746static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2747{
2748 u8 rc = 0, cos, i;
2749
2750 /* Wait until tx fastpath tasks complete */
2751 for_each_tx_queue(bp, i) {
2752 struct bnx2x_fastpath *fp = &bp->fp[i];
2753
2754 for_each_cos_in_tx_queue(fp, cos)
2755 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2756 if (rc)
2757 return rc;
2758 }
2759 return 0;
2760}
2761
9f6c9258 2762/* must be called with rtnl_lock */
5d07d868 2763int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2764{
2765 int i;
c9ee9206
VZ
2766 bool global = false;
2767
55c11941
MS
2768 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2769
9ce392d4 2770 /* mark driver is unloaded in shmem2 */
ad5afc89 2771 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2772 u32 val;
2773 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2774 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2775 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2776 }
2777
ad5afc89
AE
2778 if (IS_PF(bp) &&
2779 (bp->state == BNX2X_STATE_CLOSED ||
2780 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2781 /* We can get here if the driver has been unloaded
2782 * during parity error recovery and is either waiting for a
2783 * leader to complete or for other functions to unload and
2784 * then ifdown has been issued. In this case we want to
2785 * unload and let other functions to complete a recovery
2786 * process.
2787 */
9f6c9258
DK
2788 bp->recovery_state = BNX2X_RECOVERY_DONE;
2789 bp->is_leader = 0;
c9ee9206
VZ
2790 bnx2x_release_leader_lock(bp);
2791 smp_mb();
2792
51c1a580
MS
2793 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2794 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2795 return -EINVAL;
2796 }
2797
87b7ba3d
VZ
2798 /*
2799 * It's important to set the bp->state to the value different from
2800 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2801 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2802 */
2803 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2804 smp_mb();
2805
55c11941
MS
2806 if (CNIC_LOADED(bp))
2807 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2808
9505ee37
VZ
2809 /* Stop Tx */
2810 bnx2x_tx_disable(bp);
65565884 2811 netdev_reset_tc(bp->dev);
9505ee37 2812
9f6c9258 2813 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2814
9f6c9258 2815 del_timer_sync(&bp->timer);
f85582f8 2816
ad5afc89
AE
2817 if (IS_PF(bp)) {
2818 /* Set ALWAYS_ALIVE bit in shmem */
2819 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2820 bnx2x_drv_pulse(bp);
2821 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2822 bnx2x_save_statistics(bp);
2823 }
9f6c9258 2824
ad5afc89
AE
2825 /* wait till consumers catch up with producers in all queues */
2826 bnx2x_drain_tx_queues(bp);
9f6c9258 2827
9b176b6b
AE
2828 /* if VF indicate to PF this function is going down (PF will delete sp
2829 * elements and clear initializations
2830 */
2831 if (IS_VF(bp))
2832 bnx2x_vfpf_close_vf(bp);
2833 else if (unload_mode != UNLOAD_RECOVERY)
2834 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2835 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2836 else {
c9ee9206
VZ
2837 /* Send the UNLOAD_REQUEST to the MCP */
2838 bnx2x_send_unload_req(bp, unload_mode);
2839
2840 /*
2841 * Prevent transactions to host from the functions on the
2842 * engine that doesn't reset global blocks in case of global
2843 * attention once gloabl blocks are reset and gates are opened
2844 * (the engine which leader will perform the recovery
2845 * last).
2846 */
2847 if (!CHIP_IS_E1x(bp))
2848 bnx2x_pf_disable(bp);
2849
2850 /* Disable HW interrupts, NAPI */
523224a3 2851 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2852 /* Delete all NAPI objects */
2853 bnx2x_del_all_napi(bp);
55c11941
MS
2854 if (CNIC_LOADED(bp))
2855 bnx2x_del_all_napi_cnic(bp);
523224a3 2856 /* Release IRQs */
d6214d7a 2857 bnx2x_free_irq(bp);
c9ee9206
VZ
2858
2859 /* Report UNLOAD_DONE to MCP */
5d07d868 2860 bnx2x_send_unload_done(bp, false);
523224a3 2861 }
9f6c9258 2862
619c5cb6
VZ
2863 /*
2864 * At this stage no more interrupts will arrive so we may safly clean
2865 * the queueable objects here in case they failed to get cleaned so far.
2866 */
ad5afc89
AE
2867 if (IS_PF(bp))
2868 bnx2x_squeeze_objects(bp);
619c5cb6 2869
79616895
VZ
2870 /* There should be no more pending SP commands at this stage */
2871 bp->sp_state = 0;
2872
9f6c9258
DK
2873 bp->port.pmf = 0;
2874
2875 /* Free SKBs, SGEs, TPA pool and driver internals */
2876 bnx2x_free_skbs(bp);
55c11941
MS
2877 if (CNIC_LOADED(bp))
2878 bnx2x_free_skbs_cnic(bp);
ec6ba945 2879 for_each_rx_queue(bp, i)
9f6c9258 2880 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2881
ad5afc89
AE
2882 bnx2x_free_fp_mem(bp);
2883 if (CNIC_LOADED(bp))
55c11941 2884 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2885
ad5afc89
AE
2886 if (IS_PF(bp)) {
2887 bnx2x_free_mem(bp);
2888 if (CNIC_LOADED(bp))
2889 bnx2x_free_mem_cnic(bp);
2890 }
9f6c9258 2891 bp->state = BNX2X_STATE_CLOSED;
55c11941 2892 bp->cnic_loaded = false;
9f6c9258 2893
c9ee9206
VZ
2894 /* Check if there are pending parity attentions. If there are - set
2895 * RECOVERY_IN_PROGRESS.
2896 */
ad5afc89 2897 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2898 bnx2x_set_reset_in_progress(bp);
2899
2900 /* Set RESET_IS_GLOBAL if needed */
2901 if (global)
2902 bnx2x_set_reset_global(bp);
2903 }
2904
2905
9f6c9258
DK
2906 /* The last driver must disable a "close the gate" if there is no
2907 * parity attention or "process kill" pending.
2908 */
ad5afc89
AE
2909 if (IS_PF(bp) &&
2910 !bnx2x_clear_pf_load(bp) &&
2911 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2912 bnx2x_disable_close_the_gate(bp);
2913
55c11941
MS
2914 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2915
9f6c9258
DK
2916 return 0;
2917}
f85582f8 2918
9f6c9258
DK
2919int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2920{
2921 u16 pmcsr;
2922
adf5f6a1
DK
2923 /* If there is no power capability, silently succeed */
2924 if (!bp->pm_cap) {
51c1a580 2925 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
2926 return 0;
2927 }
2928
9f6c9258
DK
2929 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2930
2931 switch (state) {
2932 case PCI_D0:
2933 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2934 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2935 PCI_PM_CTRL_PME_STATUS));
2936
2937 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2938 /* delay required during transition out of D3hot */
2939 msleep(20);
2940 break;
2941
2942 case PCI_D3hot:
2943 /* If there are other clients above don't
2944 shut down the power */
2945 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2946 return 0;
2947 /* Don't shut down the power for emulation and FPGA */
2948 if (CHIP_REV_IS_SLOW(bp))
2949 return 0;
2950
2951 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2952 pmcsr |= 3;
2953
2954 if (bp->wol)
2955 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2956
2957 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2958 pmcsr);
2959
2960 /* No more memory access after this point until
2961 * device is brought back to D0.
2962 */
2963 break;
2964
2965 default:
51c1a580 2966 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
2967 return -EINVAL;
2968 }
2969 return 0;
2970}
2971
9f6c9258
DK
2972/*
2973 * net_device service functions
2974 */
d6214d7a 2975int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
2976{
2977 int work_done = 0;
6383c0b3 2978 u8 cos;
9f6c9258
DK
2979 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2980 napi);
2981 struct bnx2x *bp = fp->bp;
2982
2983 while (1) {
2984#ifdef BNX2X_STOP_ON_ERROR
2985 if (unlikely(bp->panic)) {
2986 napi_complete(napi);
2987 return 0;
2988 }
2989#endif
2990
6383c0b3 2991 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
2992 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2993 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 2994
9f6c9258
DK
2995
2996 if (bnx2x_has_rx_work(fp)) {
2997 work_done += bnx2x_rx_int(fp, budget - work_done);
2998
2999 /* must not complete if we consumed full budget */
3000 if (work_done >= budget)
3001 break;
3002 }
3003
3004 /* Fall out from the NAPI loop if needed */
3005 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3006
ec6ba945
VZ
3007 /* No need to update SB for FCoE L2 ring as long as
3008 * it's connected to the default SB and the SB
3009 * has been updated when NAPI was scheduled.
3010 */
3011 if (IS_FCOE_FP(fp)) {
3012 napi_complete(napi);
3013 break;
3014 }
9f6c9258 3015 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3016 /* bnx2x_has_rx_work() reads the status block,
3017 * thus we need to ensure that status block indices
3018 * have been actually read (bnx2x_update_fpsb_idx)
3019 * prior to this check (bnx2x_has_rx_work) so that
3020 * we won't write the "newer" value of the status block
3021 * to IGU (if there was a DMA right after
3022 * bnx2x_has_rx_work and if there is no rmb, the memory
3023 * reading (bnx2x_update_fpsb_idx) may be postponed
3024 * to right before bnx2x_ack_sb). In this case there
3025 * will never be another interrupt until there is
3026 * another update of the status block, while there
3027 * is still unhandled work.
3028 */
9f6c9258
DK
3029 rmb();
3030
3031 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3032 napi_complete(napi);
3033 /* Re-enable interrupts */
51c1a580 3034 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3035 "Update index to %d\n", fp->fp_hc_idx);
3036 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3037 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3038 IGU_INT_ENABLE, 1);
3039 break;
3040 }
3041 }
3042 }
3043
3044 return work_done;
3045}
3046
9f6c9258
DK
3047/* we split the first BD into headers and data BDs
3048 * to ease the pain of our fellow microcode engineers
3049 * we use one mapping for both BDs
9f6c9258
DK
3050 */
3051static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
6383c0b3 3052 struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
3053 struct sw_tx_bd *tx_buf,
3054 struct eth_tx_start_bd **tx_bd, u16 hlen,
3055 u16 bd_prod, int nbd)
3056{
3057 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3058 struct eth_tx_bd *d_tx_bd;
3059 dma_addr_t mapping;
3060 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3061
3062 /* first fix first BD */
3063 h_tx_bd->nbd = cpu_to_le16(nbd);
3064 h_tx_bd->nbytes = cpu_to_le16(hlen);
3065
51c1a580
MS
3066 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
3067 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
9f6c9258
DK
3068
3069 /* now get a new data BD
3070 * (after the pbd) and fill it */
3071 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3072 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3073
3074 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3075 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3076
3077 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3078 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3079 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3080
3081 /* this marks the BD as one that has no individual mapping */
3082 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3083
3084 DP(NETIF_MSG_TX_QUEUED,
3085 "TSO split data size is %d (%x:%x)\n",
3086 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3087
3088 /* update tx_bd */
3089 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3090
3091 return bd_prod;
3092}
3093
3094static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3095{
3096 if (fix > 0)
3097 csum = (u16) ~csum_fold(csum_sub(csum,
3098 csum_partial(t_header - fix, fix, 0)));
3099
3100 else if (fix < 0)
3101 csum = (u16) ~csum_fold(csum_add(csum,
3102 csum_partial(t_header, -fix, 0)));
3103
3104 return swab16(csum);
3105}
3106
3107static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3108{
3109 u32 rc;
3110
3111 if (skb->ip_summed != CHECKSUM_PARTIAL)
3112 rc = XMIT_PLAIN;
3113
3114 else {
d0d9d8ef 3115 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
3116 rc = XMIT_CSUM_V6;
3117 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3118 rc |= XMIT_CSUM_TCP;
3119
3120 } else {
3121 rc = XMIT_CSUM_V4;
3122 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3123 rc |= XMIT_CSUM_TCP;
3124 }
3125 }
3126
5892b9e9
VZ
3127 if (skb_is_gso_v6(skb))
3128 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
3129 else if (skb_is_gso(skb))
3130 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
3131
3132 return rc;
3133}
3134
3135#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3136/* check if packet requires linearization (packet is too fragmented)
3137 no need to check fragmentation if page size > 8K (there will be no
3138 violation to FW restrictions) */
3139static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3140 u32 xmit_type)
3141{
3142 int to_copy = 0;
3143 int hlen = 0;
3144 int first_bd_sz = 0;
3145
3146 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3147 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3148
3149 if (xmit_type & XMIT_GSO) {
3150 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3151 /* Check if LSO packet needs to be copied:
3152 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3153 int wnd_size = MAX_FETCH_BD - 3;
3154 /* Number of windows to check */
3155 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3156 int wnd_idx = 0;
3157 int frag_idx = 0;
3158 u32 wnd_sum = 0;
3159
3160 /* Headers length */
3161 hlen = (int)(skb_transport_header(skb) - skb->data) +
3162 tcp_hdrlen(skb);
3163
3164 /* Amount of data (w/o headers) on linear part of SKB*/
3165 first_bd_sz = skb_headlen(skb) - hlen;
3166
3167 wnd_sum = first_bd_sz;
3168
3169 /* Calculate the first sum - it's special */
3170 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3171 wnd_sum +=
9e903e08 3172 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3173
3174 /* If there was data on linear skb data - check it */
3175 if (first_bd_sz > 0) {
3176 if (unlikely(wnd_sum < lso_mss)) {
3177 to_copy = 1;
3178 goto exit_lbl;
3179 }
3180
3181 wnd_sum -= first_bd_sz;
3182 }
3183
3184 /* Others are easier: run through the frag list and
3185 check all windows */
3186 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3187 wnd_sum +=
9e903e08 3188 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3189
3190 if (unlikely(wnd_sum < lso_mss)) {
3191 to_copy = 1;
3192 break;
3193 }
3194 wnd_sum -=
9e903e08 3195 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3196 }
3197 } else {
3198 /* in non-LSO too fragmented packet should always
3199 be linearized */
3200 to_copy = 1;
3201 }
3202 }
3203
3204exit_lbl:
3205 if (unlikely(to_copy))
3206 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3207 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3208 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3209 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3210
3211 return to_copy;
3212}
3213#endif
3214
2297a2da
VZ
3215static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3216 u32 xmit_type)
f2e0899f 3217{
2297a2da
VZ
3218 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3219 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3220 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
3221 if ((xmit_type & XMIT_GSO_V6) &&
3222 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 3223 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3224}
3225
3226/**
e8920674 3227 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3228 *
e8920674
DK
3229 * @skb: packet skb
3230 * @pbd: parse BD
3231 * @xmit_type: xmit flags
f2e0899f
DK
3232 */
3233static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3234 struct eth_tx_parse_bd_e1x *pbd,
3235 u32 xmit_type)
3236{
3237 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3238 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
3239 pbd->tcp_flags = pbd_tcp_flags(skb);
3240
3241 if (xmit_type & XMIT_GSO_V4) {
3242 pbd->ip_id = swab16(ip_hdr(skb)->id);
3243 pbd->tcp_pseudo_csum =
3244 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3245 ip_hdr(skb)->daddr,
3246 0, IPPROTO_TCP, 0));
3247
3248 } else
3249 pbd->tcp_pseudo_csum =
3250 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3251 &ipv6_hdr(skb)->daddr,
3252 0, IPPROTO_TCP, 0));
3253
3254 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
3255}
f85582f8 3256
f2e0899f 3257/**
e8920674 3258 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3259 *
e8920674
DK
3260 * @bp: driver handle
3261 * @skb: packet skb
3262 * @parsing_data: data to be updated
3263 * @xmit_type: xmit flags
f2e0899f 3264 *
e8920674 3265 * 57712 related
f2e0899f
DK
3266 */
3267static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 3268 u32 *parsing_data, u32 xmit_type)
f2e0899f 3269{
e39aece7
VZ
3270 *parsing_data |=
3271 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3272 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3273 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 3274
e39aece7
VZ
3275 if (xmit_type & XMIT_CSUM_TCP) {
3276 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3277 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3278 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3279
e39aece7
VZ
3280 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3281 } else
3282 /* We support checksum offload for TCP and UDP only.
3283 * No need to pass the UDP header length - it's a constant.
3284 */
3285 return skb_transport_header(skb) +
3286 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3287}
3288
93ef5c02
DK
3289static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3290 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3291{
93ef5c02
DK
3292 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3293
3294 if (xmit_type & XMIT_CSUM_V4)
3295 tx_start_bd->bd_flags.as_bitfield |=
3296 ETH_TX_BD_FLAGS_IP_CSUM;
3297 else
3298 tx_start_bd->bd_flags.as_bitfield |=
3299 ETH_TX_BD_FLAGS_IPV6;
3300
3301 if (!(xmit_type & XMIT_CSUM_TCP))
3302 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3303}
3304
f2e0899f 3305/**
e8920674 3306 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3307 *
e8920674
DK
3308 * @bp: driver handle
3309 * @skb: packet skb
3310 * @pbd: parse BD to be updated
3311 * @xmit_type: xmit flags
f2e0899f
DK
3312 */
3313static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3314 struct eth_tx_parse_bd_e1x *pbd,
3315 u32 xmit_type)
3316{
e39aece7 3317 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3318
3319 /* for now NS flag is not used in Linux */
3320 pbd->global_data =
3321 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3322 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3323
3324 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3325 skb_network_header(skb)) >> 1;
f2e0899f 3326
e39aece7
VZ
3327 hlen += pbd->ip_hlen_w;
3328
3329 /* We support checksum offload for TCP and UDP only */
3330 if (xmit_type & XMIT_CSUM_TCP)
3331 hlen += tcp_hdrlen(skb) / 2;
3332 else
3333 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3334
3335 pbd->total_hlen_w = cpu_to_le16(hlen);
3336 hlen = hlen*2;
3337
3338 if (xmit_type & XMIT_CSUM_TCP) {
3339 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3340
3341 } else {
3342 s8 fix = SKB_CS_OFF(skb); /* signed! */
3343
3344 DP(NETIF_MSG_TX_QUEUED,
3345 "hlen %d fix %d csum before fix %x\n",
3346 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3347
3348 /* HW bug: fixup the CSUM */
3349 pbd->tcp_pseudo_csum =
3350 bnx2x_csum_fix(skb_transport_header(skb),
3351 SKB_CS(skb), fix);
3352
3353 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3354 pbd->tcp_pseudo_csum);
3355 }
3356
3357 return hlen;
3358}
f85582f8 3359
9f6c9258
DK
3360/* called with netif_tx_lock
3361 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3362 * netif_wake_queue()
3363 */
3364netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3365{
3366 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3367
9f6c9258 3368 struct netdev_queue *txq;
6383c0b3 3369 struct bnx2x_fp_txdata *txdata;
9f6c9258 3370 struct sw_tx_bd *tx_buf;
619c5cb6 3371 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3372 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3373 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3374 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 3375 u32 pbd_e2_parsing_data = 0;
9f6c9258 3376 u16 pkt_prod, bd_prod;
65565884 3377 int nbd, txq_index;
9f6c9258
DK
3378 dma_addr_t mapping;
3379 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3380 int i;
3381 u8 hlen = 0;
3382 __le16 pkt_size = 0;
3383 struct ethhdr *eth;
3384 u8 mac_type = UNICAST_ADDRESS;
3385
3386#ifdef BNX2X_STOP_ON_ERROR
3387 if (unlikely(bp->panic))
3388 return NETDEV_TX_BUSY;
3389#endif
3390
6383c0b3
AE
3391 txq_index = skb_get_queue_mapping(skb);
3392 txq = netdev_get_tx_queue(dev, txq_index);
3393
55c11941 3394 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3395
65565884 3396 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3397
3398 /* enable this debug print to view the transmission queue being used
51c1a580 3399 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3400 txq_index, fp_index, txdata_index); */
9f6c9258 3401
6383c0b3 3402 /* enable this debug print to view the tranmission details
51c1a580
MS
3403 DP(NETIF_MSG_TX_QUEUED,
3404 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3405 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3406
6383c0b3 3407 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3408 skb_shinfo(skb)->nr_frags +
3409 BDS_PER_TX_PKT +
3410 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3411 /* Handle special storage cases separately */
c96bdc0c
DK
3412 if (txdata->tx_ring_size == 0) {
3413 struct bnx2x_eth_q_stats *q_stats =
3414 bnx2x_fp_qstats(bp, txdata->parent_fp);
3415 q_stats->driver_filtered_tx_pkt++;
3416 dev_kfree_skb(skb);
3417 return NETDEV_TX_OK;
3418 }
2384d6aa
DK
3419 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3420 netif_tx_stop_queue(txq);
c96bdc0c 3421 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3422
9f6c9258
DK
3423 return NETDEV_TX_BUSY;
3424 }
3425
51c1a580
MS
3426 DP(NETIF_MSG_TX_QUEUED,
3427 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
6383c0b3 3428 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
3429 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3430
3431 eth = (struct ethhdr *)skb->data;
3432
3433 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3434 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3435 if (is_broadcast_ether_addr(eth->h_dest))
3436 mac_type = BROADCAST_ADDRESS;
3437 else
3438 mac_type = MULTICAST_ADDRESS;
3439 }
3440
3441#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3442 /* First, check if we need to linearize the skb (due to FW
3443 restrictions). No need to check fragmentation if page size > 8K
3444 (there will be no violation to FW restrictions) */
3445 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3446 /* Statistics of linearization */
3447 bp->lin_cnt++;
3448 if (skb_linearize(skb) != 0) {
51c1a580
MS
3449 DP(NETIF_MSG_TX_QUEUED,
3450 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3451 dev_kfree_skb_any(skb);
3452 return NETDEV_TX_OK;
3453 }
3454 }
3455#endif
619c5cb6
VZ
3456 /* Map skb linear data for DMA */
3457 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3458 skb_headlen(skb), DMA_TO_DEVICE);
3459 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3460 DP(NETIF_MSG_TX_QUEUED,
3461 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3462 dev_kfree_skb_any(skb);
3463 return NETDEV_TX_OK;
3464 }
9f6c9258
DK
3465 /*
3466 Please read carefully. First we use one BD which we mark as start,
3467 then we have a parsing info BD (used for TSO or xsum),
3468 and only then we have the rest of the TSO BDs.
3469 (don't forget to mark the last one as last,
3470 and to unmap only AFTER you write to the BD ...)
3471 And above all, all pdb sizes are in words - NOT DWORDS!
3472 */
3473
619c5cb6
VZ
3474 /* get current pkt produced now - advance it just before sending packet
3475 * since mapping of pages may fail and cause packet to be dropped
3476 */
6383c0b3
AE
3477 pkt_prod = txdata->tx_pkt_prod;
3478 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3479
619c5cb6
VZ
3480 /* get a tx_buf and first BD
3481 * tx_start_bd may be changed during SPLIT,
3482 * but first_bd will always stay first
3483 */
6383c0b3
AE
3484 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3485 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3486 first_bd = tx_start_bd;
9f6c9258
DK
3487
3488 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
96bed4b9
YM
3489 SET_FLAG(tx_start_bd->general_data,
3490 ETH_TX_START_BD_PARSE_NBDS,
3491 0);
f85582f8 3492
9f6c9258 3493 /* header nbd */
f85582f8 3494 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
3495
3496 /* remember the first BD of the packet */
6383c0b3 3497 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3498 tx_buf->skb = skb;
3499 tx_buf->flags = 0;
3500
3501 DP(NETIF_MSG_TX_QUEUED,
3502 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3503 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3504
eab6d18d 3505 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3506 tx_start_bd->vlan_or_ethertype =
3507 cpu_to_le16(vlan_tx_tag_get(skb));
3508 tx_start_bd->bd_flags.as_bitfield |=
3509 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3510 } else {
3511 /* when transmitting in a vf, start bd must hold the ethertype
3512 * for fw to enforce it
3513 */
3514 if (IS_VF(bp)) {
3515 tx_start_bd->vlan_or_ethertype =
3516 cpu_to_le16(ntohs(eth->h_proto));
3517 } else {
3518 /* used by FW for packet accounting */
3519 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3520 }
3521 }
9f6c9258
DK
3522
3523 /* turn on parsing and get a BD */
3524 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3525
93ef5c02
DK
3526 if (xmit_type & XMIT_CSUM)
3527 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3528
619c5cb6 3529 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3530 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f
DK
3531 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3532 /* Set PBD in checksum offload case */
3533 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
3534 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3535 &pbd_e2_parsing_data,
3536 xmit_type);
dc1ba591
AE
3537
3538 if (IS_MF_SI(bp) || IS_VF(bp)) {
3539 /* fill in the MAC addresses in the PBD - for local
619c5cb6
VZ
3540 * switching
3541 */
3542 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3543 &pbd_e2->src_mac_addr_mid,
3544 &pbd_e2->src_mac_addr_lo,
3545 eth->h_source);
3546 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3547 &pbd_e2->dst_mac_addr_mid,
3548 &pbd_e2->dst_mac_addr_lo,
3549 eth->h_dest);
3550 }
96bed4b9
YM
3551
3552 SET_FLAG(pbd_e2_parsing_data,
3553 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3554 } else {
96bed4b9 3555 u16 global_data = 0;
6383c0b3 3556 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3557 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3558 /* Set PBD in checksum offload case */
3559 if (xmit_type & XMIT_CSUM)
3560 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3561
96bed4b9
YM
3562 SET_FLAG(global_data,
3563 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3564 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3565 }
3566
f85582f8 3567 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3568 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3569 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
619c5cb6 3570 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
9f6c9258
DK
3571 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3572 pkt_size = tx_start_bd->nbytes;
3573
51c1a580
MS
3574 DP(NETIF_MSG_TX_QUEUED,
3575 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
9f6c9258
DK
3576 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3577 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3578 tx_start_bd->bd_flags.as_bitfield,
3579 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3580
3581 if (xmit_type & XMIT_GSO) {
3582
3583 DP(NETIF_MSG_TX_QUEUED,
3584 "TSO packet len %d hlen %d total len %d tso size %d\n",
3585 skb->len, hlen, skb_headlen(skb),
3586 skb_shinfo(skb)->gso_size);
3587
3588 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3589
3590 if (unlikely(skb_headlen(skb) > hlen))
6383c0b3
AE
3591 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3592 &tx_start_bd, hlen,
3593 bd_prod, ++nbd);
619c5cb6 3594 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3595 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3596 xmit_type);
f2e0899f
DK
3597 else
3598 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 3599 }
2297a2da
VZ
3600
3601 /* Set the PBD's parsing_data field if not zero
3602 * (for the chips newer than 57711).
3603 */
3604 if (pbd_e2_parsing_data)
3605 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3606
9f6c9258
DK
3607 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3608
f85582f8 3609 /* Handle fragmented skb */
9f6c9258
DK
3610 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3611 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3612
9e903e08
ED
3613 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3614 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3615 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3616 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3617
51c1a580
MS
3618 DP(NETIF_MSG_TX_QUEUED,
3619 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3620
3621 /* we need unmap all buffers already mapped
3622 * for this SKB;
3623 * first_bd->nbd need to be properly updated
3624 * before call to bnx2x_free_tx_pkt
3625 */
3626 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3627 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3628 TX_BD(txdata->tx_pkt_prod),
3629 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3630 return NETDEV_TX_OK;
3631 }
3632
9f6c9258 3633 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3634 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3635 if (total_pkt_bd == NULL)
6383c0b3 3636 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3637
9f6c9258
DK
3638 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3639 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3640 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3641 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3642 nbd++;
9f6c9258
DK
3643
3644 DP(NETIF_MSG_TX_QUEUED,
3645 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3646 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3647 le16_to_cpu(tx_data_bd->nbytes));
3648 }
3649
3650 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3651
619c5cb6
VZ
3652 /* update with actual num BDs */
3653 first_bd->nbd = cpu_to_le16(nbd);
3654
9f6c9258
DK
3655 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3656
3657 /* now send a tx doorbell, counting the next BD
3658 * if the packet contains or ends with it
3659 */
3660 if (TX_BD_POFF(bd_prod) < nbd)
3661 nbd++;
3662
619c5cb6
VZ
3663 /* total_pkt_bytes should be set on the first data BD if
3664 * it's not an LSO packet and there is more than one
3665 * data BD. In this case pkt_size is limited by an MTU value.
3666 * However we prefer to set it for an LSO packet (while we don't
3667 * have to) in order to save some CPU cycles in a none-LSO
3668 * case, when we much more care about them.
3669 */
9f6c9258
DK
3670 if (total_pkt_bd != NULL)
3671 total_pkt_bd->total_pkt_bytes = pkt_size;
3672
523224a3 3673 if (pbd_e1x)
9f6c9258 3674 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3675 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3676 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3677 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3678 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3679 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3680 if (pbd_e2)
3681 DP(NETIF_MSG_TX_QUEUED,
3682 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3683 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3684 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3685 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3686 pbd_e2->parsing_data);
9f6c9258
DK
3687 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3688
2df1a70a
TH
3689 netdev_tx_sent_queue(txq, skb->len);
3690
8373c57d
WB
3691 skb_tx_timestamp(skb);
3692
6383c0b3 3693 txdata->tx_pkt_prod++;
9f6c9258
DK
3694 /*
3695 * Make sure that the BD data is updated before updating the producer
3696 * since FW might read the BD right after the producer is updated.
3697 * This is only applicable for weak-ordered memory model archs such
3698 * as IA-64. The following barrier is also mandatory since FW will
3699 * assumes packets must have BDs.
3700 */
3701 wmb();
3702
6383c0b3 3703 txdata->tx_db.data.prod += nbd;
9f6c9258 3704 barrier();
f85582f8 3705
6383c0b3 3706 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
3707
3708 mmiowb();
3709
6383c0b3 3710 txdata->tx_bd_prod += nbd;
9f6c9258 3711
7df2dc6b 3712 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
3713 netif_tx_stop_queue(txq);
3714
3715 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3716 * ordering of set_bit() in netif_tx_stop_queue() and read of
3717 * fp->bd_tx_cons */
3718 smp_mb();
3719
15192a8c 3720 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 3721 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
3722 netif_tx_wake_queue(txq);
3723 }
6383c0b3 3724 txdata->tx_pkt++;
9f6c9258
DK
3725
3726 return NETDEV_TX_OK;
3727}
f85582f8 3728
6383c0b3
AE
3729/**
3730 * bnx2x_setup_tc - routine to configure net_device for multi tc
3731 *
3732 * @netdev: net device to configure
3733 * @tc: number of traffic classes to enable
3734 *
3735 * callback connected to the ndo_setup_tc function pointer
3736 */
3737int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3738{
3739 int cos, prio, count, offset;
3740 struct bnx2x *bp = netdev_priv(dev);
3741
3742 /* setup tc must be called under rtnl lock */
3743 ASSERT_RTNL();
3744
3745 /* no traffic classes requested. aborting */
3746 if (!num_tc) {
3747 netdev_reset_tc(dev);
3748 return 0;
3749 }
3750
3751 /* requested to support too many traffic classes */
3752 if (num_tc > bp->max_cos) {
51c1a580
MS
3753 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3754 num_tc, bp->max_cos);
6383c0b3
AE
3755 return -EINVAL;
3756 }
3757
3758 /* declare amount of supported traffic classes */
3759 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 3760 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
3761 return -EINVAL;
3762 }
3763
3764 /* configure priority to traffic class mapping */
3765 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3766 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
3767 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3768 "mapping priority %d to tc %d\n",
6383c0b3
AE
3769 prio, bp->prio_to_cos[prio]);
3770 }
3771
3772
3773 /* Use this configuration to diffrentiate tc0 from other COSes
3774 This can be used for ets or pfc, and save the effort of setting
3775 up a multio class queue disc or negotiating DCBX with a switch
3776 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 3777 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
3778 for (prio = 1; prio < 16; prio++) {
3779 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 3780 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
3781 } */
3782
3783 /* configure traffic class to transmission queue mapping */
3784 for (cos = 0; cos < bp->max_cos; cos++) {
3785 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 3786 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 3787 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
3788 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3789 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
3790 cos, offset, count);
3791 }
3792
3793 return 0;
3794}
3795
abc5a021
AE
3796/* New mac for VF. Consider these cases:
3797 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3798 * supply at acquire.
3799 * 2. VF has already been acquired but has not yet initialized - store in local
3800 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
3801 * will configure this mac when it is ready.
3802 * 3. VF has already initialized but has not yet setup a queue - post the new
3803 * mac on VF's bulletin board right now. VF will configure this mac when it
3804 * is ready.
3805 * 4. VF has already set a queue - delete any macs already configured for this
3806 * queue and manually config the new mac.
3807 * In any event, once this function has been called refuse any attempts by the
3808 * VF to configure any mac for itself except for this mac. In case of a race
3809 * where the VF fails to see the new post on its bulletin board before sending a
3810 * mac configuration request, the PF will simply fail the request and VF can try
3811 * again after consulting its bulletin board
3812 */
3813int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
3814{
3815 struct bnx2x *bp = netdev_priv(dev);
3816 int rc, q_logical_state, vfidx = queue;
3817 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3818 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3819
3820 /* if SRIOV is disabled there is nothing to do (and somewhere, someone
3821 * has erred).
3822 */
3823 if (!IS_SRIOV(bp)) {
3824 BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
3825 return -EINVAL;
3826 }
3827
3828 if (!is_valid_ether_addr(mac)) {
3829 BNX2X_ERR("mac address invalid\n");
3830 return -EINVAL;
3831 }
3832
3833 /* update PF's copy of the VF's bulletin. will no longer accept mac
3834 * configuration requests from vf unless match this mac
3835 */
3836 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
3837 memcpy(bulletin->mac, mac, ETH_ALEN);
3838
3839 /* Post update on VF's bulletin board */
3840 rc = bnx2x_post_vf_bulletin(bp, vfidx);
3841 if (rc) {
3842 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
3843 return rc;
3844 }
3845
3846 /* is vf initialized and queue set up? */
3847 q_logical_state =
3848 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3849 if (vf->state == VF_ENABLED &&
3850 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3851 /* configure the mac in device on this vf's queue */
3852 unsigned long flags = 0;
3853 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3854
3855 /* must lock vfpf channel to protect against vf flows */
3856 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3857
3858 /* remove existing eth macs */
3859 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3860 if (rc) {
3861 BNX2X_ERR("failed to delete eth macs\n");
3862 return -EINVAL;
3863 }
3864
3865 /* remove existing uc list macs */
3866 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3867 if (rc) {
3868 BNX2X_ERR("failed to delete uc_list macs\n");
3869 return -EINVAL;
3870 }
3871
3872 /* configure the new mac to device */
3873 __set_bit(RAMROD_COMP_WAIT, &flags);
3874 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3875 BNX2X_ETH_MAC, &flags);
3876
3877 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3878 }
3879
3880 return rc;
3881}
3882
9f6c9258
DK
3883/* called with rtnl_lock */
3884int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3885{
3886 struct sockaddr *addr = p;
3887 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 3888 int rc = 0;
9f6c9258 3889
51c1a580
MS
3890 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3891 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 3892 return -EINVAL;
51c1a580 3893 }
614c76df 3894
a3348722
BW
3895 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3896 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 3897 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 3898 return -EINVAL;
51c1a580 3899 }
9f6c9258 3900
619c5cb6
VZ
3901 if (netif_running(dev)) {
3902 rc = bnx2x_set_eth_mac(bp, false);
3903 if (rc)
3904 return rc;
3905 }
3906
9f6c9258 3907 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 3908
523224a3 3909 if (netif_running(dev))
619c5cb6 3910 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 3911
619c5cb6 3912 return rc;
9f6c9258
DK
3913}
3914
b3b83c3f
DK
3915static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3916{
3917 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3918 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 3919 u8 cos;
b3b83c3f
DK
3920
3921 /* Common */
55c11941 3922
b3b83c3f
DK
3923 if (IS_FCOE_IDX(fp_index)) {
3924 memset(sb, 0, sizeof(union host_hc_status_block));
3925 fp->status_blk_mapping = 0;
b3b83c3f 3926 } else {
b3b83c3f 3927 /* status blocks */
619c5cb6 3928 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3929 BNX2X_PCI_FREE(sb->e2_sb,
3930 bnx2x_fp(bp, fp_index,
3931 status_blk_mapping),
3932 sizeof(struct host_hc_status_block_e2));
3933 else
3934 BNX2X_PCI_FREE(sb->e1x_sb,
3935 bnx2x_fp(bp, fp_index,
3936 status_blk_mapping),
3937 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 3938 }
55c11941 3939
b3b83c3f
DK
3940 /* Rx */
3941 if (!skip_rx_queue(bp, fp_index)) {
3942 bnx2x_free_rx_bds(fp);
3943
3944 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3945 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3946 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3947 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3948 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3949
3950 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3951 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3952 sizeof(struct eth_fast_path_rx_cqe) *
3953 NUM_RCQ_BD);
3954
3955 /* SGE ring */
3956 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3957 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3958 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3959 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3960 }
3961
3962 /* Tx */
3963 if (!skip_tx_queue(bp, fp_index)) {
3964 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 3965 for_each_cos_in_tx_queue(fp, cos) {
65565884 3966 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 3967
51c1a580 3968 DP(NETIF_MSG_IFDOWN,
94f05b0f 3969 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
3970 fp_index, cos, txdata->cid);
3971
3972 BNX2X_FREE(txdata->tx_buf_ring);
3973 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3974 txdata->tx_desc_mapping,
3975 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3976 }
b3b83c3f
DK
3977 }
3978 /* end of fastpath */
3979}
3980
55c11941
MS
3981void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3982{
3983 int i;
3984 for_each_cnic_queue(bp, i)
3985 bnx2x_free_fp_mem_at(bp, i);
3986}
3987
b3b83c3f
DK
3988void bnx2x_free_fp_mem(struct bnx2x *bp)
3989{
3990 int i;
55c11941 3991 for_each_eth_queue(bp, i)
b3b83c3f
DK
3992 bnx2x_free_fp_mem_at(bp, i);
3993}
3994
1191cb83 3995static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
3996{
3997 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 3998 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
3999 bnx2x_fp(bp, index, sb_index_values) =
4000 (__le16 *)status_blk.e2_sb->sb.index_values;
4001 bnx2x_fp(bp, index, sb_running_index) =
4002 (__le16 *)status_blk.e2_sb->sb.running_index;
4003 } else {
4004 bnx2x_fp(bp, index, sb_index_values) =
4005 (__le16 *)status_blk.e1x_sb->sb.index_values;
4006 bnx2x_fp(bp, index, sb_running_index) =
4007 (__le16 *)status_blk.e1x_sb->sb.running_index;
4008 }
4009}
4010
1191cb83
ED
4011/* Returns the number of actually allocated BDs */
4012static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4013 int rx_ring_size)
4014{
4015 struct bnx2x *bp = fp->bp;
4016 u16 ring_prod, cqe_ring_prod;
4017 int i, failure_cnt = 0;
4018
4019 fp->rx_comp_cons = 0;
4020 cqe_ring_prod = ring_prod = 0;
4021
4022 /* This routine is called only during fo init so
4023 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4024 */
4025 for (i = 0; i < rx_ring_size; i++) {
4026 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4027 failure_cnt++;
4028 continue;
4029 }
4030 ring_prod = NEXT_RX_IDX(ring_prod);
4031 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4032 WARN_ON(ring_prod <= (i - failure_cnt));
4033 }
4034
4035 if (failure_cnt)
4036 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4037 i - failure_cnt, fp->index);
4038
4039 fp->rx_bd_prod = ring_prod;
4040 /* Limit the CQE producer by the CQE ring size */
4041 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4042 cqe_ring_prod);
4043 fp->rx_pkt = fp->rx_calls = 0;
4044
15192a8c 4045 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4046
4047 return i - failure_cnt;
4048}
4049
4050static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4051{
4052 int i;
4053
4054 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4055 struct eth_rx_cqe_next_page *nextpg;
4056
4057 nextpg = (struct eth_rx_cqe_next_page *)
4058 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4059 nextpg->addr_hi =
4060 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4061 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4062 nextpg->addr_lo =
4063 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4064 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4065 }
4066}
4067
b3b83c3f
DK
4068static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4069{
4070 union host_hc_status_block *sb;
4071 struct bnx2x_fastpath *fp = &bp->fp[index];
4072 int ring_size = 0;
6383c0b3 4073 u8 cos;
c2188952 4074 int rx_ring_size = 0;
b3b83c3f 4075
a3348722
BW
4076 if (!bp->rx_ring_size &&
4077 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4078 rx_ring_size = MIN_RX_SIZE_NONTPA;
4079 bp->rx_ring_size = rx_ring_size;
55c11941 4080 } else if (!bp->rx_ring_size) {
c2188952
VZ
4081 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4082
065f8b92
YM
4083 if (CHIP_IS_E3(bp)) {
4084 u32 cfg = SHMEM_RD(bp,
4085 dev_info.port_hw_config[BP_PORT(bp)].
4086 default_cfg);
4087
4088 /* Decrease ring size for 1G functions */
4089 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4090 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4091 rx_ring_size /= 10;
4092 }
d760fc37 4093
c2188952
VZ
4094 /* allocate at least number of buffers required by FW */
4095 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4096 MIN_RX_SIZE_TPA, rx_ring_size);
4097
4098 bp->rx_ring_size = rx_ring_size;
614c76df 4099 } else /* if rx_ring_size specified - use it */
c2188952 4100 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4101
b3b83c3f
DK
4102 /* Common */
4103 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4104
b3b83c3f 4105 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4106 /* status blocks */
619c5cb6 4107 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4108 BNX2X_PCI_ALLOC(sb->e2_sb,
4109 &bnx2x_fp(bp, index, status_blk_mapping),
4110 sizeof(struct host_hc_status_block_e2));
4111 else
4112 BNX2X_PCI_ALLOC(sb->e1x_sb,
4113 &bnx2x_fp(bp, index, status_blk_mapping),
4114 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4115 }
8eef2af1
DK
4116
4117 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4118 * set shortcuts for it.
4119 */
4120 if (!IS_FCOE_IDX(index))
4121 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4122
4123 /* Tx */
4124 if (!skip_tx_queue(bp, index)) {
4125 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4126 for_each_cos_in_tx_queue(fp, cos) {
65565884 4127 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4128
51c1a580
MS
4129 DP(NETIF_MSG_IFUP,
4130 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4131 index, cos);
4132
4133 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4134 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4135 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4136 &txdata->tx_desc_mapping,
b3b83c3f 4137 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4138 }
b3b83c3f
DK
4139 }
4140
4141 /* Rx */
4142 if (!skip_rx_queue(bp, index)) {
4143 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4144 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4145 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4146 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4147 &bnx2x_fp(bp, index, rx_desc_mapping),
4148 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4149
4150 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4151 &bnx2x_fp(bp, index, rx_comp_mapping),
4152 sizeof(struct eth_fast_path_rx_cqe) *
4153 NUM_RCQ_BD);
4154
4155 /* SGE ring */
4156 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4157 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4158 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4159 &bnx2x_fp(bp, index, rx_sge_mapping),
4160 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4161 /* RX BD ring */
4162 bnx2x_set_next_page_rx_bd(fp);
4163
4164 /* CQ ring */
4165 bnx2x_set_next_page_rx_cq(fp);
4166
4167 /* BDs */
4168 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4169 if (ring_size < rx_ring_size)
4170 goto alloc_mem_err;
4171 }
4172
4173 return 0;
4174
4175/* handles low memory cases */
4176alloc_mem_err:
4177 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4178 index, ring_size);
4179 /* FW will drop all packets if queue is not big enough,
4180 * In these cases we disable the queue
6383c0b3 4181 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4182 */
4183 if (ring_size < (fp->disable_tpa ?
eb722d7a 4184 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4185 /* release memory allocated for this queue */
4186 bnx2x_free_fp_mem_at(bp, index);
4187 return -ENOMEM;
4188 }
4189 return 0;
4190}
4191
55c11941
MS
4192int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4193{
4194 if (!NO_FCOE(bp))
4195 /* FCoE */
4196 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4197 /* we will fail load process instead of mark
4198 * NO_FCOE_FLAG
4199 */
4200 return -ENOMEM;
4201
4202 return 0;
4203}
4204
b3b83c3f
DK
4205int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4206{
4207 int i;
4208
55c11941
MS
4209 /* 1. Allocate FP for leading - fatal if error
4210 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4211 */
4212
4213 /* leading */
4214 if (bnx2x_alloc_fp_mem_at(bp, 0))
4215 return -ENOMEM;
6383c0b3 4216
b3b83c3f
DK
4217 /* RSS */
4218 for_each_nondefault_eth_queue(bp, i)
4219 if (bnx2x_alloc_fp_mem_at(bp, i))
4220 break;
4221
4222 /* handle memory failures */
4223 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4224 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4225
4226 WARN_ON(delta < 0);
55c11941
MS
4227 if (CNIC_SUPPORT(bp))
4228 /* move non eth FPs next to last eth FP
4229 * must be done in that order
4230 * FCOE_IDX < FWD_IDX < OOO_IDX
4231 */
b3b83c3f 4232
55c11941
MS
4233 /* move FCoE fp even NO_FCOE_FLAG is on */
4234 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4235 bp->num_ethernet_queues -= delta;
4236 bp->num_queues = bp->num_ethernet_queues +
4237 bp->num_cnic_queues;
b3b83c3f
DK
4238 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4239 bp->num_queues + delta, bp->num_queues);
4240 }
4241
4242 return 0;
4243}
d6214d7a 4244
523224a3
DK
4245void bnx2x_free_mem_bp(struct bnx2x *bp)
4246{
15192a8c 4247 kfree(bp->fp->tpa_info);
523224a3 4248 kfree(bp->fp);
15192a8c
BW
4249 kfree(bp->sp_objs);
4250 kfree(bp->fp_stats);
65565884 4251 kfree(bp->bnx2x_txq);
523224a3
DK
4252 kfree(bp->msix_table);
4253 kfree(bp->ilt);
4254}
4255
0329aba1 4256int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4257{
4258 struct bnx2x_fastpath *fp;
4259 struct msix_entry *tbl;
4260 struct bnx2x_ilt *ilt;
6383c0b3 4261 int msix_table_size = 0;
55c11941 4262 int fp_array_size, txq_array_size;
15192a8c 4263 int i;
6383c0b3
AE
4264
4265 /*
4266 * The biggest MSI-X table we might need is as a maximum number of fast
4267 * path IGU SBs plus default SB (for PF).
4268 */
1ab4434c
AE
4269 msix_table_size = bp->igu_sb_cnt;
4270 if (IS_PF(bp))
4271 msix_table_size++;
4272 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4273
6383c0b3 4274 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4275 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
15192a8c
BW
4276 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
4277
4278 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4279 if (!fp)
4280 goto alloc_err;
15192a8c
BW
4281 for (i = 0; i < fp_array_size; i++) {
4282 fp[i].tpa_info =
4283 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4284 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4285 if (!(fp[i].tpa_info))
4286 goto alloc_err;
4287 }
4288
523224a3
DK
4289 bp->fp = fp;
4290
15192a8c
BW
4291 /* allocate sp objs */
4292 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
4293 GFP_KERNEL);
4294 if (!bp->sp_objs)
4295 goto alloc_err;
4296
4297 /* allocate fp_stats */
4298 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
4299 GFP_KERNEL);
4300 if (!bp->fp_stats)
4301 goto alloc_err;
4302
65565884 4303 /* Allocate memory for the transmission queues array */
55c11941
MS
4304 txq_array_size =
4305 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4306 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4307
4308 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4309 GFP_KERNEL);
65565884
MS
4310 if (!bp->bnx2x_txq)
4311 goto alloc_err;
4312
523224a3 4313 /* msix table */
01e23742 4314 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4315 if (!tbl)
4316 goto alloc_err;
4317 bp->msix_table = tbl;
4318
4319 /* ilt */
4320 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4321 if (!ilt)
4322 goto alloc_err;
4323 bp->ilt = ilt;
4324
4325 return 0;
4326alloc_err:
4327 bnx2x_free_mem_bp(bp);
4328 return -ENOMEM;
4329
4330}
4331
a9fccec7 4332int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4333{
4334 struct bnx2x *bp = netdev_priv(dev);
4335
4336 if (unlikely(!netif_running(dev)))
4337 return 0;
4338
5d07d868 4339 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4340 return bnx2x_nic_load(bp, LOAD_NORMAL);
4341}
4342
1ac9e428
YR
4343int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4344{
4345 u32 sel_phy_idx = 0;
4346 if (bp->link_params.num_phys <= 1)
4347 return INT_PHY;
4348
4349 if (bp->link_vars.link_up) {
4350 sel_phy_idx = EXT_PHY1;
4351 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4352 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4353 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4354 sel_phy_idx = EXT_PHY2;
4355 } else {
4356
4357 switch (bnx2x_phy_selection(&bp->link_params)) {
4358 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4359 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4360 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4361 sel_phy_idx = EXT_PHY1;
4362 break;
4363 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4364 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4365 sel_phy_idx = EXT_PHY2;
4366 break;
4367 }
4368 }
4369
4370 return sel_phy_idx;
4371
4372}
4373int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4374{
4375 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4376 /*
4377 * The selected actived PHY is always after swapping (in case PHY
4378 * swapping is enabled). So when swapping is enabled, we need to reverse
4379 * the configuration
4380 */
4381
4382 if (bp->link_params.multi_phy_config &
4383 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4384 if (sel_phy_idx == EXT_PHY1)
4385 sel_phy_idx = EXT_PHY2;
4386 else if (sel_phy_idx == EXT_PHY2)
4387 sel_phy_idx = EXT_PHY1;
4388 }
4389 return LINK_CONFIG_IDX(sel_phy_idx);
4390}
4391
55c11941 4392#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4393int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4394{
4395 struct bnx2x *bp = netdev_priv(dev);
4396 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4397
4398 switch (type) {
4399 case NETDEV_FCOE_WWNN:
4400 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4401 cp->fcoe_wwn_node_name_lo);
4402 break;
4403 case NETDEV_FCOE_WWPN:
4404 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4405 cp->fcoe_wwn_port_name_lo);
4406 break;
4407 default:
51c1a580 4408 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4409 return -EINVAL;
4410 }
4411
4412 return 0;
4413}
4414#endif
4415
9f6c9258
DK
4416/* called with rtnl_lock */
4417int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4418{
4419 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4420
4421 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4422 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4423 return -EAGAIN;
4424 }
4425
4426 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4427 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4428 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4429 return -EINVAL;
51c1a580 4430 }
9f6c9258
DK
4431
4432 /* This does not race with packet allocation
4433 * because the actual alloc size is
4434 * only updated as part of load
4435 */
4436 dev->mtu = new_mtu;
4437
66371c44
MM
4438 return bnx2x_reload_if_running(dev);
4439}
4440
c8f44aff 4441netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4442 netdev_features_t features)
66371c44
MM
4443{
4444 struct bnx2x *bp = netdev_priv(dev);
4445
4446 /* TPA requires Rx CSUM offloading */
621b4d66 4447 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4448 features &= ~NETIF_F_LRO;
621b4d66
DK
4449 features &= ~NETIF_F_GRO;
4450 }
66371c44
MM
4451
4452 return features;
4453}
4454
c8f44aff 4455int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4456{
4457 struct bnx2x *bp = netdev_priv(dev);
4458 u32 flags = bp->flags;
538dd2e3 4459 bool bnx2x_reload = false;
66371c44
MM
4460
4461 if (features & NETIF_F_LRO)
4462 flags |= TPA_ENABLE_FLAG;
4463 else
4464 flags &= ~TPA_ENABLE_FLAG;
4465
621b4d66
DK
4466 if (features & NETIF_F_GRO)
4467 flags |= GRO_ENABLE_FLAG;
4468 else
4469 flags &= ~GRO_ENABLE_FLAG;
4470
538dd2e3
MB
4471 if (features & NETIF_F_LOOPBACK) {
4472 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4473 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4474 bnx2x_reload = true;
4475 }
4476 } else {
4477 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4478 bp->link_params.loopback_mode = LOOPBACK_NONE;
4479 bnx2x_reload = true;
4480 }
4481 }
4482
66371c44
MM
4483 if (flags ^ bp->flags) {
4484 bp->flags = flags;
538dd2e3
MB
4485 bnx2x_reload = true;
4486 }
66371c44 4487
538dd2e3 4488 if (bnx2x_reload) {
66371c44
MM
4489 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4490 return bnx2x_reload_if_running(dev);
4491 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4492 }
4493
66371c44 4494 return 0;
9f6c9258
DK
4495}
4496
4497void bnx2x_tx_timeout(struct net_device *dev)
4498{
4499 struct bnx2x *bp = netdev_priv(dev);
4500
4501#ifdef BNX2X_STOP_ON_ERROR
4502 if (!bp->panic)
4503 bnx2x_panic();
4504#endif
7be08a72
AE
4505
4506 smp_mb__before_clear_bit();
4507 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4508 smp_mb__after_clear_bit();
4509
9f6c9258 4510 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4511 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4512}
4513
9f6c9258
DK
4514int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4515{
4516 struct net_device *dev = pci_get_drvdata(pdev);
4517 struct bnx2x *bp;
4518
4519 if (!dev) {
4520 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4521 return -ENODEV;
4522 }
4523 bp = netdev_priv(dev);
4524
4525 rtnl_lock();
4526
4527 pci_save_state(pdev);
4528
4529 if (!netif_running(dev)) {
4530 rtnl_unlock();
4531 return 0;
4532 }
4533
4534 netif_device_detach(dev);
4535
5d07d868 4536 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4537
4538 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4539
4540 rtnl_unlock();
4541
4542 return 0;
4543}
4544
4545int bnx2x_resume(struct pci_dev *pdev)
4546{
4547 struct net_device *dev = pci_get_drvdata(pdev);
4548 struct bnx2x *bp;
4549 int rc;
4550
4551 if (!dev) {
4552 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4553 return -ENODEV;
4554 }
4555 bp = netdev_priv(dev);
4556
4557 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4558 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4559 return -EAGAIN;
4560 }
4561
4562 rtnl_lock();
4563
4564 pci_restore_state(pdev);
4565
4566 if (!netif_running(dev)) {
4567 rtnl_unlock();
4568 return 0;
4569 }
4570
4571 bnx2x_set_power_state(bp, PCI_D0);
4572 netif_device_attach(dev);
4573
4574 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4575
4576 rtnl_unlock();
4577
4578 return rc;
4579}
619c5cb6
VZ
4580
4581
4582void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4583 u32 cid)
4584{
4585 /* ustorm cxt validation */
4586 cxt->ustorm_ag_context.cdu_usage =
4587 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4588 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4589 /* xcontext validation */
4590 cxt->xstorm_ag_context.cdu_reserved =
4591 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4592 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4593}
4594
1191cb83
ED
4595static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4596 u8 fw_sb_id, u8 sb_index,
4597 u8 ticks)
619c5cb6
VZ
4598{
4599
4600 u32 addr = BAR_CSTRORM_INTMEM +
4601 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4602 REG_WR8(bp, addr, ticks);
51c1a580
MS
4603 DP(NETIF_MSG_IFUP,
4604 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4605 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4606}
4607
1191cb83
ED
4608static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4609 u16 fw_sb_id, u8 sb_index,
4610 u8 disable)
619c5cb6
VZ
4611{
4612 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4613 u32 addr = BAR_CSTRORM_INTMEM +
4614 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4615 u16 flags = REG_RD16(bp, addr);
4616 /* clear and set */
4617 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4618 flags |= enable_flag;
4619 REG_WR16(bp, addr, flags);
51c1a580
MS
4620 DP(NETIF_MSG_IFUP,
4621 "port %x fw_sb_id %d sb_index %d disable %d\n",
4622 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4623}
4624
4625void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4626 u8 sb_index, u8 disable, u16 usec)
4627{
4628 int port = BP_PORT(bp);
4629 u8 ticks = usec / BNX2X_BTR;
4630
4631 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4632
4633 disable = disable ? 1 : (usec ? 0 : 1);
4634 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4635}