1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/prefetch.h>
27 #include "bnx2x_cmn.h"
28 #include "bnx2x_init.h"
34 * bnx2x_move_fp - move content of the fastpath structure.
37 * @from: source FP index
38 * @to: destination FP index
40 * Makes sure the contents of the bp->fp[to].napi is kept
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target. Update txdata pointers and related
46 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
89 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
91 /* free skb in the packet ring at pos idx
92 * return idx of last bd freed
94 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
95 u16 idx, unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
98 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
99 struct eth_tx_start_bd *tx_start_bd;
100 struct eth_tx_bd *tx_data_bd;
101 struct sk_buff *skb = tx_buf->skb;
102 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
105 /* prefetch skb end pointer to speedup dev_kfree_skb() */
108 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
109 txdata->txq_index, idx, tx_buf, skb);
112 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
113 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
114 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
117 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118 #ifdef BNX2X_STOP_ON_ERROR
119 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120 BNX2X_ERR("BAD nbd!\n");
124 new_cons = nbd + tx_buf->first_bd;
126 /* Get the next bd */
127 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
129 /* Skip a parse bd... */
131 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
133 /* ...and the TSO split header bd since they have no mapping */
134 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
142 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
143 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
153 (*bytes_compl) += skb->len;
156 dev_kfree_skb_any(skb);
157 tx_buf->first_bd = 0;
163 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
165 struct netdev_queue *txq;
166 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
167 unsigned int pkts_compl = 0, bytes_compl = 0;
169 #ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
178 while (sw_cons != hw_cons) {
181 pkt_cons = TX_BD(sw_cons);
183 DP(NETIF_MSG_TX_DONE,
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
195 txdata->tx_pkt_cons = sw_cons;
196 txdata->tx_bd_cons = bd_cons;
198 /* Need to make the tx_bd_cons update visible to start_xmit()
199 * before checking for netif_tx_queue_stopped(). Without the
200 * memory barrier, there is a small possibility that
201 * start_xmit() will miss it and cause the queue to be stopped
203 * On the other hand we need an rmb() here to ensure the proper
204 * ordering of bit testing in the following
205 * netif_tx_queue_stopped(txq) call.
209 if (unlikely(netif_tx_queue_stopped(txq))) {
210 /* Taking tx_lock() is needed to prevent reenabling the queue
211 * while it's empty. This could have happen if rx_action() gets
212 * suspended in bnx2x_tx_int() after the condition before
213 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
215 * stops the queue->sees fresh tx_bd_cons->releases the queue->
216 * sends some packets consuming the whole queue again->
220 __netif_tx_lock(txq, smp_processor_id());
222 if ((netif_tx_queue_stopped(txq)) &&
223 (bp->state == BNX2X_STATE_OPEN) &&
224 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
225 netif_tx_wake_queue(txq);
227 __netif_tx_unlock(txq);
232 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
235 u16 last_max = fp->last_max_sge;
237 if (SUB_S16(idx, last_max) > 0)
238 fp->last_max_sge = idx;
241 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
243 struct eth_end_agg_rx_cqe *cqe)
245 struct bnx2x *bp = fp->bp;
246 u16 last_max, last_elem, first_elem;
253 /* First mark all used pages */
254 for (i = 0; i < sge_len; i++)
255 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
256 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
258 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
259 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
261 /* Here we assume that the last SGE index is the biggest */
262 prefetch((void *)(fp->sge_mask));
263 bnx2x_update_last_max_sge(fp,
264 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
266 last_max = RX_SGE(fp->last_max_sge);
267 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
270 /* If ring is not full */
271 if (last_elem + 1 != first_elem)
274 /* Now update the prod */
275 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276 if (likely(fp->sge_mask[i]))
279 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280 delta += BIT_VEC64_ELEM_SZ;
284 fp->rx_sge_prod += delta;
285 /* clear page-end entries */
286 bnx2x_clear_sge_mask_next_elems(fp);
289 DP(NETIF_MSG_RX_STATUS,
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
291 fp->last_max_sge, fp->rx_sge_prod);
294 /* Set Toeplitz hash value in the skb using the value from the
295 * CQE (calculated by HW).
297 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
298 const struct eth_fast_path_rx_cqe *cqe,
301 /* Set Toeplitz hash from CQE */
302 if ((bp->dev->features & NETIF_F_RXHASH) &&
303 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304 enum eth_rss_hash_type htype;
306 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308 (htype == TCP_IPV6_HASH_TYPE);
309 return le32_to_cpu(cqe->rss_hash_result);
315 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
317 struct eth_fast_path_rx_cqe *cqe)
319 struct bnx2x *bp = fp->bp;
320 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
321 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
322 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
324 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
325 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
327 /* print error if current state != stop */
328 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
331 /* Try to map an empty data buffer from the aggregation info */
332 mapping = dma_map_single(&bp->pdev->dev,
333 first_buf->data + NET_SKB_PAD,
334 fp->rx_buf_size, DMA_FROM_DEVICE);
336 * ...if it fails - move the skb from the consumer to the producer
337 * and set the current aggregation state as ERROR to drop it
338 * when TPA_STOP arrives.
341 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
342 /* Move the BD from the consumer to the producer */
343 bnx2x_reuse_rx_data(fp, cons, prod);
344 tpa_info->tpa_state = BNX2X_TPA_ERROR;
348 /* move empty data from pool to prod */
349 prod_rx_buf->data = first_buf->data;
350 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
351 /* point prod_bd to new data */
352 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
353 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
355 /* move partial skb from cons to pool (don't unmap yet) */
356 *first_buf = *cons_rx_buf;
358 /* mark bin state as START */
359 tpa_info->parsing_flags =
360 le16_to_cpu(cqe->pars_flags.flags);
361 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
362 tpa_info->tpa_state = BNX2X_TPA_START;
363 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
364 tpa_info->placement_offset = cqe->placement_offset;
365 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
366 if (fp->mode == TPA_MODE_GRO) {
367 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
368 tpa_info->full_page =
369 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
370 tpa_info->gro_size = gro_size;
373 #ifdef BNX2X_STOP_ON_ERROR
374 fp->tpa_queue_used |= (1 << queue);
375 #ifdef _ASM_GENERIC_INT_L64_H
376 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
378 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
384 /* Timestamp option length allowed for TPA aggregation:
386 * nop nop kind length echo val
388 #define TPA_TSTAMP_OPT_LEN 12
390 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
393 * @parsing_flags: parsing flags from the START CQE
394 * @len_on_bd: total length of the first packet for the
397 * Approximate value of the MSS for this aggregation calculated using
398 * the first packet of it.
400 static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
404 * TPA arrgregation won't have either IP options or TCP options
405 * other than timestamp or IPv6 extension headers.
407 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
409 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
410 PRS_FLAG_OVERETH_IPV6)
411 hdrs_len += sizeof(struct ipv6hdr);
413 hdrs_len += sizeof(struct iphdr);
416 /* Check if there was a TCP timestamp, if there is it's will
417 * always be 12 bytes length: nop nop kind length echo val.
419 * Otherwise FW would close the aggregation.
421 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
422 hdrs_len += TPA_TSTAMP_OPT_LEN;
424 return len_on_bd - hdrs_len;
427 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
430 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
431 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
435 if (unlikely(page == NULL)) {
436 BNX2X_ERR("Can't alloc sge\n");
440 mapping = dma_map_page(&bp->pdev->dev, page, 0,
441 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
442 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443 __free_pages(page, PAGES_PER_SGE_SHIFT);
444 BNX2X_ERR("Can't map sge\n");
449 dma_unmap_addr_set(sw_buf, mapping, mapping);
451 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
452 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
457 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
458 struct bnx2x_agg_info *tpa_info,
461 struct eth_end_agg_rx_cqe *cqe,
464 struct sw_rx_page *rx_pg, old_rx_pg;
465 u32 i, frag_len, frag_size;
466 int err, j, frag_id = 0;
467 u16 len_on_bd = tpa_info->len_on_bd;
468 u16 full_page = 0, gro_size = 0;
470 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
472 if (fp->mode == TPA_MODE_GRO) {
473 gro_size = tpa_info->gro_size;
474 full_page = tpa_info->full_page;
477 /* This is needed in order to enable forwarding support */
479 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
480 tpa_info->parsing_flags, len_on_bd);
483 if (fp->mode == TPA_MODE_GRO)
484 skb_shinfo(skb)->gso_type =
485 (GET_FLAG(tpa_info->parsing_flags,
486 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
487 PRS_FLAG_OVERETH_IPV6) ?
488 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
492 #ifdef BNX2X_STOP_ON_ERROR
493 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
494 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
496 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
502 /* Run through the SGL and compose the fragmented skb */
503 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
504 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
506 /* FW gives the indices of the SGE as if the ring is an array
507 (meaning that "next" element will consume 2 indices) */
508 if (fp->mode == TPA_MODE_GRO)
509 frag_len = min_t(u32, frag_size, (u32)full_page);
511 frag_len = min_t(u32, frag_size,
512 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
514 rx_pg = &fp->rx_page_ring[sge_idx];
517 /* If we fail to allocate a substitute page, we simply stop
518 where we are and drop the whole packet */
519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
521 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
525 /* Unmap the page as we r going to pass it to the stack */
526 dma_unmap_page(&bp->pdev->dev,
527 dma_unmap_addr(&old_rx_pg, mapping),
528 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
529 /* Add one frag and update the appropriate fields in the skb */
530 if (fp->mode == TPA_MODE_LRO)
531 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
535 for (rem = frag_len; rem > 0; rem -= gro_size) {
536 int len = rem > gro_size ? gro_size : rem;
537 skb_fill_page_desc(skb, frag_id++,
538 old_rx_pg.page, offset, len);
540 get_page(old_rx_pg.page);
545 skb->data_len += frag_len;
546 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
547 skb->len += frag_len;
549 frag_size -= frag_len;
555 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
557 if (fp->rx_frag_size)
558 put_page(virt_to_head_page(data));
563 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
565 if (fp->rx_frag_size)
566 return netdev_alloc_frag(fp->rx_frag_size);
568 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
572 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
573 struct bnx2x_agg_info *tpa_info,
575 struct eth_end_agg_rx_cqe *cqe,
578 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
579 u8 pad = tpa_info->placement_offset;
580 u16 len = tpa_info->len_on_bd;
581 struct sk_buff *skb = NULL;
582 u8 *new_data, *data = rx_buf->data;
583 u8 old_tpa_state = tpa_info->tpa_state;
585 tpa_info->tpa_state = BNX2X_TPA_STOP;
587 /* If we there was an error during the handling of the TPA_START -
588 * drop this aggregation.
590 if (old_tpa_state == BNX2X_TPA_ERROR)
593 /* Try to allocate the new data */
594 new_data = bnx2x_frag_alloc(fp);
595 /* Unmap skb in the pool anyway, as we are going to change
596 pool entry status to BNX2X_TPA_STOP even if new skb allocation
598 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
599 fp->rx_buf_size, DMA_FROM_DEVICE);
600 if (likely(new_data))
601 skb = build_skb(data, fp->rx_frag_size);
604 #ifdef BNX2X_STOP_ON_ERROR
605 if (pad + len > fp->rx_buf_size) {
606 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
607 pad, len, fp->rx_buf_size);
613 skb_reserve(skb, pad + NET_SKB_PAD);
615 skb->rxhash = tpa_info->rxhash;
616 skb->l4_rxhash = tpa_info->l4_rxhash;
618 skb->protocol = eth_type_trans(skb, bp->dev);
619 skb->ip_summed = CHECKSUM_UNNECESSARY;
621 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
622 skb, cqe, cqe_idx)) {
623 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
624 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
625 napi_gro_receive(&fp->napi, skb);
627 DP(NETIF_MSG_RX_STATUS,
628 "Failed to allocate new pages - dropping packet!\n");
629 dev_kfree_skb_any(skb);
633 /* put new data in bin */
634 rx_buf->data = new_data;
638 bnx2x_frag_free(fp, new_data);
640 /* drop the packet and keep the buffer in the bin */
641 DP(NETIF_MSG_RX_STATUS,
642 "Failed to allocate or map a new skb - dropping packet!\n");
643 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
646 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
647 struct bnx2x_fastpath *fp, u16 index)
650 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
651 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
654 data = bnx2x_frag_alloc(fp);
655 if (unlikely(data == NULL))
658 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
661 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
662 bnx2x_frag_free(fp, data);
663 BNX2X_ERR("Can't map rx data\n");
668 dma_unmap_addr_set(rx_buf, mapping, mapping);
670 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
671 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
677 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
678 struct bnx2x_fastpath *fp,
679 struct bnx2x_eth_q_stats *qstats)
681 /* Do nothing if no L4 csum validation was done.
682 * We do not check whether IP csum was validated. For IPv4 we assume
683 * that if the card got as far as validating the L4 csum, it also
684 * validated the IP csum. IPv6 has no IP csum.
686 if (cqe->fast_path_cqe.status_flags &
687 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
690 /* If L4 validation was done, check if an error was found. */
692 if (cqe->fast_path_cqe.type_error_flags &
693 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
694 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
695 qstats->hw_csum_err++;
697 skb->ip_summed = CHECKSUM_UNNECESSARY;
700 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
702 struct bnx2x *bp = fp->bp;
703 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
704 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
707 #ifdef BNX2X_STOP_ON_ERROR
708 if (unlikely(bp->panic))
712 /* CQ "next element" is of the size of the regular element,
713 that's why it's ok here */
714 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
715 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
718 bd_cons = fp->rx_bd_cons;
719 bd_prod = fp->rx_bd_prod;
720 bd_prod_fw = bd_prod;
721 sw_comp_cons = fp->rx_comp_cons;
722 sw_comp_prod = fp->rx_comp_prod;
724 /* Memory barrier necessary as speculative reads of the rx
725 * buffer can be ahead of the index in the status block
729 DP(NETIF_MSG_RX_STATUS,
730 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
731 fp->index, hw_comp_cons, sw_comp_cons);
733 while (sw_comp_cons != hw_comp_cons) {
734 struct sw_rx_bd *rx_buf = NULL;
736 union eth_rx_cqe *cqe;
737 struct eth_fast_path_rx_cqe *cqe_fp;
739 enum eth_rx_cqe_type cqe_fp_type;
744 #ifdef BNX2X_STOP_ON_ERROR
745 if (unlikely(bp->panic))
749 comp_ring_cons = RCQ_BD(sw_comp_cons);
750 bd_prod = RX_BD(bd_prod);
751 bd_cons = RX_BD(bd_cons);
753 cqe = &fp->rx_comp_ring[comp_ring_cons];
754 cqe_fp = &cqe->fast_path_cqe;
755 cqe_fp_flags = cqe_fp->type_error_flags;
756 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
758 DP(NETIF_MSG_RX_STATUS,
759 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
760 CQE_TYPE(cqe_fp_flags),
761 cqe_fp_flags, cqe_fp->status_flags,
762 le32_to_cpu(cqe_fp->rss_hash_result),
763 le16_to_cpu(cqe_fp->vlan_tag),
764 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
766 /* is this a slowpath msg? */
767 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
768 bnx2x_sp_event(fp, cqe);
772 rx_buf = &fp->rx_buf_ring[bd_cons];
775 if (!CQE_TYPE_FAST(cqe_fp_type)) {
776 struct bnx2x_agg_info *tpa_info;
777 u16 frag_size, pages;
778 #ifdef BNX2X_STOP_ON_ERROR
780 if (fp->disable_tpa &&
781 (CQE_TYPE_START(cqe_fp_type) ||
782 CQE_TYPE_STOP(cqe_fp_type)))
783 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
784 CQE_TYPE(cqe_fp_type));
787 if (CQE_TYPE_START(cqe_fp_type)) {
788 u16 queue = cqe_fp->queue_index;
789 DP(NETIF_MSG_RX_STATUS,
790 "calling tpa_start on queue %d\n",
793 bnx2x_tpa_start(fp, queue,
800 queue = cqe->end_agg_cqe.queue_index;
801 tpa_info = &fp->tpa_info[queue];
802 DP(NETIF_MSG_RX_STATUS,
803 "calling tpa_stop on queue %d\n",
806 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
809 if (fp->mode == TPA_MODE_GRO)
810 pages = (frag_size + tpa_info->full_page - 1) /
813 pages = SGE_PAGE_ALIGN(frag_size) >>
816 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
817 &cqe->end_agg_cqe, comp_ring_cons);
818 #ifdef BNX2X_STOP_ON_ERROR
823 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
827 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
828 pad = cqe_fp->placement_offset;
829 dma_sync_single_for_cpu(&bp->pdev->dev,
830 dma_unmap_addr(rx_buf, mapping),
831 pad + RX_COPY_THRESH,
834 prefetch(data + pad); /* speedup eth_type_trans() */
835 /* is this an error packet? */
836 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
837 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
838 "ERROR flags %x rx packet %u\n",
839 cqe_fp_flags, sw_comp_cons);
840 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
844 /* Since we don't have a jumbo ring
845 * copy small packets if mtu > 1500
847 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
848 (len <= RX_COPY_THRESH)) {
849 skb = netdev_alloc_skb_ip_align(bp->dev, len);
851 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
852 "ERROR packet dropped because of alloc failure\n");
853 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
856 memcpy(skb->data, data + pad, len);
857 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
859 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
860 dma_unmap_single(&bp->pdev->dev,
861 dma_unmap_addr(rx_buf, mapping),
864 skb = build_skb(data, fp->rx_frag_size);
865 if (unlikely(!skb)) {
866 bnx2x_frag_free(fp, data);
867 bnx2x_fp_qstats(bp, fp)->
868 rx_skb_alloc_failed++;
871 skb_reserve(skb, pad);
873 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
874 "ERROR packet dropped because of alloc failure\n");
875 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
877 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
883 skb->protocol = eth_type_trans(skb, bp->dev);
885 /* Set Toeplitz hash for a none-LRO skb */
886 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
887 skb->l4_rxhash = l4_rxhash;
889 skb_checksum_none_assert(skb);
891 if (bp->dev->features & NETIF_F_RXCSUM)
892 bnx2x_csum_validate(skb, cqe, fp,
893 bnx2x_fp_qstats(bp, fp));
895 skb_record_rx_queue(skb, fp->rx_queue);
897 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
899 __vlan_hwaccel_put_tag(skb,
900 le16_to_cpu(cqe_fp->vlan_tag));
901 napi_gro_receive(&fp->napi, skb);
907 bd_cons = NEXT_RX_IDX(bd_cons);
908 bd_prod = NEXT_RX_IDX(bd_prod);
909 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
912 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
913 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
915 if (rx_pkt == budget)
919 fp->rx_bd_cons = bd_cons;
920 fp->rx_bd_prod = bd_prod_fw;
921 fp->rx_comp_cons = sw_comp_cons;
922 fp->rx_comp_prod = sw_comp_prod;
924 /* Update producers */
925 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
928 fp->rx_pkt += rx_pkt;
934 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
936 struct bnx2x_fastpath *fp = fp_cookie;
937 struct bnx2x *bp = fp->bp;
941 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
942 fp->index, fp->fw_sb_id, fp->igu_sb_id);
943 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
945 #ifdef BNX2X_STOP_ON_ERROR
946 if (unlikely(bp->panic))
950 /* Handle Rx and Tx according to MSI-X vector */
951 prefetch(fp->rx_cons_sb);
953 for_each_cos_in_tx_queue(fp, cos)
954 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
956 prefetch(&fp->sb_running_index[SM_RX_ID]);
957 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
962 /* HW Lock for shared dual port PHYs */
963 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
965 mutex_lock(&bp->port.phy_mutex);
967 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
970 void bnx2x_release_phy_lock(struct bnx2x *bp)
972 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
974 mutex_unlock(&bp->port.phy_mutex);
977 /* calculates MF speed according to current linespeed and MF configuration */
978 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
980 u16 line_speed = bp->link_vars.line_speed;
982 u16 maxCfg = bnx2x_extract_max_cfg(bp,
983 bp->mf_config[BP_VN(bp)]);
985 /* Calculate the current MAX line speed limit for the MF
989 line_speed = (line_speed * maxCfg) / 100;
991 u16 vn_max_rate = maxCfg * 100;
993 if (vn_max_rate < line_speed)
994 line_speed = vn_max_rate;
1002 * bnx2x_fill_report_data - fill link report data to report
1004 * @bp: driver handle
1005 * @data: link state to update
1007 * It uses a none-atomic bit operations because is called under the mutex.
1009 static void bnx2x_fill_report_data(struct bnx2x *bp,
1010 struct bnx2x_link_report_data *data)
1012 u16 line_speed = bnx2x_get_mf_speed(bp);
1014 memset(data, 0, sizeof(*data));
1016 /* Fill the report data: efective line speed */
1017 data->line_speed = line_speed;
1020 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1021 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1022 &data->link_report_flags);
1025 if (bp->link_vars.duplex == DUPLEX_FULL)
1026 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1028 /* Rx Flow Control is ON */
1029 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1030 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1032 /* Tx Flow Control is ON */
1033 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1034 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1038 * bnx2x_link_report - report link status to OS.
1040 * @bp: driver handle
1042 * Calls the __bnx2x_link_report() under the same locking scheme
1043 * as a link/PHY state managing code to ensure a consistent link
1047 void bnx2x_link_report(struct bnx2x *bp)
1049 bnx2x_acquire_phy_lock(bp);
1050 __bnx2x_link_report(bp);
1051 bnx2x_release_phy_lock(bp);
1055 * __bnx2x_link_report - report link status to OS.
1057 * @bp: driver handle
1059 * None atomic inmlementation.
1060 * Should be called under the phy_lock.
1062 void __bnx2x_link_report(struct bnx2x *bp)
1064 struct bnx2x_link_report_data cur_data;
1067 if (!CHIP_IS_E1(bp))
1068 bnx2x_read_mf_cfg(bp);
1070 /* Read the current link report info */
1071 bnx2x_fill_report_data(bp, &cur_data);
1073 /* Don't report link down or exactly the same link status twice */
1074 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1075 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1076 &bp->last_reported_link.link_report_flags) &&
1077 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1078 &cur_data.link_report_flags)))
1083 /* We are going to report a new link parameters now -
1084 * remember the current data for the next time.
1086 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1088 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1089 &cur_data.link_report_flags)) {
1090 netif_carrier_off(bp->dev);
1091 netdev_err(bp->dev, "NIC Link is Down\n");
1097 netif_carrier_on(bp->dev);
1099 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1100 &cur_data.link_report_flags))
1105 /* Handle the FC at the end so that only these flags would be
1106 * possibly set. This way we may easily check if there is no FC
1109 if (cur_data.link_report_flags) {
1110 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1111 &cur_data.link_report_flags)) {
1112 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1113 &cur_data.link_report_flags))
1114 flow = "ON - receive & transmit";
1116 flow = "ON - receive";
1118 flow = "ON - transmit";
1123 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1124 cur_data.line_speed, duplex, flow);
1128 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1132 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1133 struct eth_rx_sge *sge;
1135 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1137 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1138 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1141 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1142 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1146 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1147 struct bnx2x_fastpath *fp, int last)
1151 for (i = 0; i < last; i++) {
1152 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1153 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1154 u8 *data = first_buf->data;
1157 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1160 if (tpa_info->tpa_state == BNX2X_TPA_START)
1161 dma_unmap_single(&bp->pdev->dev,
1162 dma_unmap_addr(first_buf, mapping),
1163 fp->rx_buf_size, DMA_FROM_DEVICE);
1164 bnx2x_frag_free(fp, data);
1165 first_buf->data = NULL;
1169 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1173 for_each_rx_queue_cnic(bp, j) {
1174 struct bnx2x_fastpath *fp = &bp->fp[j];
1178 /* Activate BD ring */
1180 * this will generate an interrupt (to the TSTORM)
1181 * must only be done after chip is initialized
1183 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1188 void bnx2x_init_rx_rings(struct bnx2x *bp)
1190 int func = BP_FUNC(bp);
1194 /* Allocate TPA resources */
1195 for_each_eth_queue(bp, j) {
1196 struct bnx2x_fastpath *fp = &bp->fp[j];
1199 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1201 if (!fp->disable_tpa) {
1202 /* Fill the per-aggregtion pool */
1203 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1204 struct bnx2x_agg_info *tpa_info =
1206 struct sw_rx_bd *first_buf =
1207 &tpa_info->first_buf;
1209 first_buf->data = bnx2x_frag_alloc(fp);
1210 if (!first_buf->data) {
1211 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1213 bnx2x_free_tpa_pool(bp, fp, i);
1214 fp->disable_tpa = 1;
1217 dma_unmap_addr_set(first_buf, mapping, 0);
1218 tpa_info->tpa_state = BNX2X_TPA_STOP;
1221 /* "next page" elements initialization */
1222 bnx2x_set_next_page_sgl(fp);
1224 /* set SGEs bit mask */
1225 bnx2x_init_sge_ring_bit_mask(fp);
1227 /* Allocate SGEs and initialize the ring elements */
1228 for (i = 0, ring_prod = 0;
1229 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1231 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1232 BNX2X_ERR("was only able to allocate %d rx sges\n",
1234 BNX2X_ERR("disabling TPA for queue[%d]\n",
1236 /* Cleanup already allocated elements */
1237 bnx2x_free_rx_sge_range(bp, fp,
1239 bnx2x_free_tpa_pool(bp, fp,
1241 fp->disable_tpa = 1;
1245 ring_prod = NEXT_SGE_IDX(ring_prod);
1248 fp->rx_sge_prod = ring_prod;
1252 for_each_eth_queue(bp, j) {
1253 struct bnx2x_fastpath *fp = &bp->fp[j];
1257 /* Activate BD ring */
1259 * this will generate an interrupt (to the TSTORM)
1260 * must only be done after chip is initialized
1262 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1268 if (CHIP_IS_E1(bp)) {
1269 REG_WR(bp, BAR_USTRORM_INTMEM +
1270 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1271 U64_LO(fp->rx_comp_mapping));
1272 REG_WR(bp, BAR_USTRORM_INTMEM +
1273 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1274 U64_HI(fp->rx_comp_mapping));
1279 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1282 struct bnx2x *bp = fp->bp;
1284 for_each_cos_in_tx_queue(fp, cos) {
1285 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1286 unsigned pkts_compl = 0, bytes_compl = 0;
1288 u16 sw_prod = txdata->tx_pkt_prod;
1289 u16 sw_cons = txdata->tx_pkt_cons;
1291 while (sw_cons != sw_prod) {
1292 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1293 &pkts_compl, &bytes_compl);
1297 netdev_tx_reset_queue(
1298 netdev_get_tx_queue(bp->dev,
1299 txdata->txq_index));
1303 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1307 for_each_tx_queue_cnic(bp, i) {
1308 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1312 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1316 for_each_eth_queue(bp, i) {
1317 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1321 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1323 struct bnx2x *bp = fp->bp;
1326 /* ring wasn't allocated */
1327 if (fp->rx_buf_ring == NULL)
1330 for (i = 0; i < NUM_RX_BD; i++) {
1331 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1332 u8 *data = rx_buf->data;
1336 dma_unmap_single(&bp->pdev->dev,
1337 dma_unmap_addr(rx_buf, mapping),
1338 fp->rx_buf_size, DMA_FROM_DEVICE);
1340 rx_buf->data = NULL;
1341 bnx2x_frag_free(fp, data);
1345 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1349 for_each_rx_queue_cnic(bp, j) {
1350 bnx2x_free_rx_bds(&bp->fp[j]);
1354 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1358 for_each_eth_queue(bp, j) {
1359 struct bnx2x_fastpath *fp = &bp->fp[j];
1361 bnx2x_free_rx_bds(fp);
1363 if (!fp->disable_tpa)
1364 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1368 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1370 bnx2x_free_tx_skbs_cnic(bp);
1371 bnx2x_free_rx_skbs_cnic(bp);
1374 void bnx2x_free_skbs(struct bnx2x *bp)
1376 bnx2x_free_tx_skbs(bp);
1377 bnx2x_free_rx_skbs(bp);
1380 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1382 /* load old values */
1383 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1385 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1386 /* leave all but MAX value */
1387 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1389 /* set new MAX value */
1390 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1391 & FUNC_MF_CFG_MAX_BW_MASK;
1393 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1398 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1400 * @bp: driver handle
1401 * @nvecs: number of vectors to be released
1403 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1407 if (nvecs == offset)
1409 free_irq(bp->msix_table[offset].vector, bp->dev);
1410 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1411 bp->msix_table[offset].vector);
1414 if (CNIC_SUPPORT(bp)) {
1415 if (nvecs == offset)
1420 for_each_eth_queue(bp, i) {
1421 if (nvecs == offset)
1423 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1424 i, bp->msix_table[offset].vector);
1426 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1430 void bnx2x_free_irq(struct bnx2x *bp)
1432 if (bp->flags & USING_MSIX_FLAG &&
1433 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1434 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1435 CNIC_SUPPORT(bp) + 1);
1437 free_irq(bp->dev->irq, bp->dev);
1440 int bnx2x_enable_msix(struct bnx2x *bp)
1442 int msix_vec = 0, i, rc, req_cnt;
1444 bp->msix_table[msix_vec].entry = msix_vec;
1445 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1446 bp->msix_table[0].entry);
1449 /* Cnic requires an msix vector for itself */
1450 if (CNIC_SUPPORT(bp)) {
1451 bp->msix_table[msix_vec].entry = msix_vec;
1452 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1453 msix_vec, bp->msix_table[msix_vec].entry);
1457 /* We need separate vectors for ETH queues only (not FCoE) */
1458 for_each_eth_queue(bp, i) {
1459 bp->msix_table[msix_vec].entry = msix_vec;
1460 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1461 msix_vec, msix_vec, i);
1465 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1467 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1470 * reconfigure number of tx/rx queues according to available
1473 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1474 /* how less vectors we will have? */
1475 int diff = req_cnt - rc;
1477 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1479 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1482 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1486 * decrease number of queues by number of unallocated entries
1488 bp->num_ethernet_queues -= diff;
1489 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1491 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1493 } else if (rc > 0) {
1494 /* Get by with single vector */
1495 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1497 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1502 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1503 bp->flags |= USING_SINGLE_MSIX_FLAG;
1505 BNX2X_DEV_INFO("set number of queues to 1\n");
1506 bp->num_ethernet_queues = 1;
1507 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1508 } else if (rc < 0) {
1509 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1513 bp->flags |= USING_MSIX_FLAG;
1518 /* fall to INTx if not enough memory */
1520 bp->flags |= DISABLE_MSI_FLAG;
1525 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1527 int i, rc, offset = 0;
1529 rc = request_irq(bp->msix_table[offset++].vector,
1530 bnx2x_msix_sp_int, 0,
1531 bp->dev->name, bp->dev);
1533 BNX2X_ERR("request sp irq failed\n");
1537 if (CNIC_SUPPORT(bp))
1540 for_each_eth_queue(bp, i) {
1541 struct bnx2x_fastpath *fp = &bp->fp[i];
1542 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1545 rc = request_irq(bp->msix_table[offset].vector,
1546 bnx2x_msix_fp_int, 0, fp->name, fp);
1548 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1549 bp->msix_table[offset].vector, rc);
1550 bnx2x_free_msix_irqs(bp, offset);
1557 i = BNX2X_NUM_ETH_QUEUES(bp);
1558 offset = 1 + CNIC_SUPPORT(bp);
1559 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1560 bp->msix_table[0].vector,
1561 0, bp->msix_table[offset].vector,
1562 i - 1, bp->msix_table[offset + i - 1].vector);
1567 int bnx2x_enable_msi(struct bnx2x *bp)
1571 rc = pci_enable_msi(bp->pdev);
1573 BNX2X_DEV_INFO("MSI is not attainable\n");
1576 bp->flags |= USING_MSI_FLAG;
1581 static int bnx2x_req_irq(struct bnx2x *bp)
1583 unsigned long flags;
1586 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1589 flags = IRQF_SHARED;
1591 if (bp->flags & USING_MSIX_FLAG)
1592 irq = bp->msix_table[0].vector;
1594 irq = bp->pdev->irq;
1596 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1599 static int bnx2x_setup_irqs(struct bnx2x *bp)
1602 if (bp->flags & USING_MSIX_FLAG &&
1603 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1604 rc = bnx2x_req_msix_irqs(bp);
1609 rc = bnx2x_req_irq(bp);
1611 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1614 if (bp->flags & USING_MSI_FLAG) {
1615 bp->dev->irq = bp->pdev->irq;
1616 netdev_info(bp->dev, "using MSI IRQ %d\n",
1619 if (bp->flags & USING_MSIX_FLAG) {
1620 bp->dev->irq = bp->msix_table[0].vector;
1621 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1629 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1633 for_each_rx_queue_cnic(bp, i)
1634 napi_enable(&bnx2x_fp(bp, i, napi));
1637 static void bnx2x_napi_enable(struct bnx2x *bp)
1641 for_each_eth_queue(bp, i)
1642 napi_enable(&bnx2x_fp(bp, i, napi));
1645 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1649 for_each_rx_queue_cnic(bp, i)
1650 napi_disable(&bnx2x_fp(bp, i, napi));
1653 static void bnx2x_napi_disable(struct bnx2x *bp)
1657 for_each_eth_queue(bp, i)
1658 napi_disable(&bnx2x_fp(bp, i, napi));
1661 void bnx2x_netif_start(struct bnx2x *bp)
1663 if (netif_running(bp->dev)) {
1664 bnx2x_napi_enable(bp);
1665 if (CNIC_LOADED(bp))
1666 bnx2x_napi_enable_cnic(bp);
1667 bnx2x_int_enable(bp);
1668 if (bp->state == BNX2X_STATE_OPEN)
1669 netif_tx_wake_all_queues(bp->dev);
1673 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1675 bnx2x_int_disable_sync(bp, disable_hw);
1676 bnx2x_napi_disable(bp);
1677 if (CNIC_LOADED(bp))
1678 bnx2x_napi_disable_cnic(bp);
1681 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1683 struct bnx2x *bp = netdev_priv(dev);
1685 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1686 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1687 u16 ether_type = ntohs(hdr->h_proto);
1689 /* Skip VLAN tag if present */
1690 if (ether_type == ETH_P_8021Q) {
1691 struct vlan_ethhdr *vhdr =
1692 (struct vlan_ethhdr *)skb->data;
1694 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1697 /* If ethertype is FCoE or FIP - use FCoE ring */
1698 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1699 return bnx2x_fcoe_tx(bp, txq_index);
1702 /* select a non-FCoE queue */
1703 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1707 void bnx2x_set_num_queues(struct bnx2x *bp)
1710 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1712 /* override in STORAGE SD modes */
1713 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1714 bp->num_ethernet_queues = 1;
1716 /* Add special queues */
1717 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1718 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1720 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1724 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1726 * @bp: Driver handle
1728 * We currently support for at most 16 Tx queues for each CoS thus we will
1729 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1732 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1733 * index after all ETH L2 indices.
1735 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1736 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1737 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1739 * The proper configuration of skb->queue_mapping is handled by
1740 * bnx2x_select_queue() and __skb_tx_hash().
1742 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1743 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1745 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1749 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1750 rx = BNX2X_NUM_ETH_QUEUES(bp);
1752 /* account for fcoe queue */
1753 if (include_cnic && !NO_FCOE(bp)) {
1758 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1760 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1763 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1765 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1769 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1775 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1779 for_each_queue(bp, i) {
1780 struct bnx2x_fastpath *fp = &bp->fp[i];
1783 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1786 * Although there are no IP frames expected to arrive to
1787 * this ring we still want to add an
1788 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1791 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1794 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1795 IP_HEADER_ALIGNMENT_PADDING +
1798 BNX2X_FW_RX_ALIGN_END;
1799 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1800 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1801 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1803 fp->rx_frag_size = 0;
1807 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1810 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1812 /* Prepare the initial contents fo the indirection table if RSS is
1815 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1816 bp->rss_conf_obj.ind_table[i] =
1818 ethtool_rxfh_indir_default(i, num_eth_queues);
1821 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1822 * per-port, so if explicit configuration is needed , do it only
1825 * For 57712 and newer on the other hand it's a per-function
1828 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1831 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1834 struct bnx2x_config_rss_params params = {NULL};
1837 /* Although RSS is meaningless when there is a single HW queue we
1838 * still need it enabled in order to have HW Rx hash generated.
1840 * if (!is_eth_multi(bp))
1841 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1844 params.rss_obj = rss_obj;
1846 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1848 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1850 /* RSS configuration */
1851 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1852 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1853 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1854 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1855 if (rss_obj->udp_rss_v4)
1856 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1857 if (rss_obj->udp_rss_v6)
1858 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1861 params.rss_result_mask = MULTI_MASK;
1863 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1867 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1868 params.rss_key[i] = random32();
1870 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1873 return bnx2x_config_rss(bp, ¶ms);
1876 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1878 struct bnx2x_func_state_params func_params = {NULL};
1880 /* Prepare parameters for function state transitions */
1881 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1883 func_params.f_obj = &bp->func_obj;
1884 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1886 func_params.params.hw_init.load_phase = load_code;
1888 return bnx2x_func_state_change(bp, &func_params);
1892 * Cleans the object that have internal lists without sending
1893 * ramrods. Should be run when interrutps are disabled.
1895 static void bnx2x_squeeze_objects(struct bnx2x *bp)
1898 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1899 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1900 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
1902 /***************** Cleanup MACs' object first *************************/
1904 /* Wait for completion of requested */
1905 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1906 /* Perform a dry cleanup */
1907 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1909 /* Clean ETH primary MAC */
1910 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1911 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
1914 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1916 /* Cleanup UC list */
1918 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1919 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1922 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1924 /***************** Now clean mcast object *****************************/
1925 rparam.mcast_obj = &bp->mcast_obj;
1926 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1928 /* Add a DEL command... */
1929 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1931 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1934 /* ...and wait until all pending commands are cleared */
1935 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1938 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1943 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1947 #ifndef BNX2X_STOP_ON_ERROR
1948 #define LOAD_ERROR_EXIT(bp, label) \
1950 (bp)->state = BNX2X_STATE_ERROR; \
1954 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1956 bp->cnic_loaded = false; \
1959 #else /*BNX2X_STOP_ON_ERROR*/
1960 #define LOAD_ERROR_EXIT(bp, label) \
1962 (bp)->state = BNX2X_STATE_ERROR; \
1966 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
1968 bp->cnic_loaded = false; \
1972 #endif /*BNX2X_STOP_ON_ERROR*/
1974 bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1976 /* build FW version dword */
1977 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1978 (BCM_5710_FW_MINOR_VERSION << 8) +
1979 (BCM_5710_FW_REVISION_VERSION << 16) +
1980 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1982 /* read loaded FW from chip */
1983 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1985 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1987 if (loaded_fw != my_fw) {
1989 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1998 * bnx2x_bz_fp - zero content of the fastpath structure.
2000 * @bp: driver handle
2001 * @index: fastpath index to be zeroed
2003 * Makes sure the contents of the bp->fp[index].napi is kept
2006 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2008 struct bnx2x_fastpath *fp = &bp->fp[index];
2009 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2012 struct napi_struct orig_napi = fp->napi;
2013 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2014 /* bzero bnx2x_fastpath contents */
2015 if (bp->stats_init) {
2016 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2017 memset(fp, 0, sizeof(*fp));
2019 /* Keep Queue statistics */
2020 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2021 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2023 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2025 if (tmp_eth_q_stats)
2026 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
2027 sizeof(struct bnx2x_eth_q_stats));
2029 tmp_eth_q_stats_old =
2030 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2032 if (tmp_eth_q_stats_old)
2033 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
2034 sizeof(struct bnx2x_eth_q_stats_old));
2036 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
2037 memset(fp, 0, sizeof(*fp));
2039 if (tmp_eth_q_stats) {
2040 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2041 sizeof(struct bnx2x_eth_q_stats));
2042 kfree(tmp_eth_q_stats);
2045 if (tmp_eth_q_stats_old) {
2046 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
2047 sizeof(struct bnx2x_eth_q_stats_old));
2048 kfree(tmp_eth_q_stats_old);
2053 /* Restore the NAPI object as it has been already initialized */
2054 fp->napi = orig_napi;
2055 fp->tpa_info = orig_tpa_info;
2059 fp->max_cos = bp->max_cos;
2061 /* Special queues support only one CoS */
2064 /* Init txdata pointers */
2066 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2068 for_each_cos_in_tx_queue(fp, cos)
2069 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2070 BNX2X_NUM_ETH_QUEUES(bp) + index];
2073 * set the tpa flag for each queue. The tpa flag determines the queue
2074 * minimal size so it must be set prior to queue memory allocation
2076 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2077 (bp->flags & GRO_ENABLE_FLAG &&
2078 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2079 if (bp->flags & TPA_ENABLE_FLAG)
2080 fp->mode = TPA_MODE_LRO;
2081 else if (bp->flags & GRO_ENABLE_FLAG)
2082 fp->mode = TPA_MODE_GRO;
2084 /* We don't want TPA on an FCoE L2 ring */
2086 fp->disable_tpa = 1;
2089 int bnx2x_load_cnic(struct bnx2x *bp)
2091 int i, rc, port = BP_PORT(bp);
2093 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2095 mutex_init(&bp->cnic_mutex);
2097 rc = bnx2x_alloc_mem_cnic(bp);
2099 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2100 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2103 rc = bnx2x_alloc_fp_mem_cnic(bp);
2105 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2106 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2109 /* Update the number of queues with the cnic queues */
2110 rc = bnx2x_set_real_num_queues(bp, 1);
2112 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2113 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2116 /* Add all CNIC NAPI objects */
2117 bnx2x_add_all_napi_cnic(bp);
2118 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2119 bnx2x_napi_enable_cnic(bp);
2121 rc = bnx2x_init_hw_func_cnic(bp);
2123 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2125 bnx2x_nic_init_cnic(bp);
2127 /* Enable Timer scan */
2128 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2130 for_each_cnic_queue(bp, i) {
2131 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2133 BNX2X_ERR("Queue setup failed\n");
2134 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2138 /* Initialize Rx filter. */
2139 netif_addr_lock_bh(bp->dev);
2140 bnx2x_set_rx_mode(bp->dev);
2141 netif_addr_unlock_bh(bp->dev);
2143 /* re-read iscsi info */
2144 bnx2x_get_iscsi_info(bp);
2145 bnx2x_setup_cnic_irq_info(bp);
2146 bnx2x_setup_cnic_info(bp);
2147 bp->cnic_loaded = true;
2148 if (bp->state == BNX2X_STATE_OPEN)
2149 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2152 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2156 #ifndef BNX2X_STOP_ON_ERROR
2158 /* Disable Timer scan */
2159 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2162 bnx2x_napi_disable_cnic(bp);
2163 /* Update the number of queues without the cnic queues */
2164 rc = bnx2x_set_real_num_queues(bp, 0);
2166 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2168 BNX2X_ERR("CNIC-related load failed\n");
2169 bnx2x_free_fp_mem_cnic(bp);
2170 bnx2x_free_mem_cnic(bp);
2172 #endif /* ! BNX2X_STOP_ON_ERROR */
2176 /* must be called with rtnl_lock */
2177 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2179 int port = BP_PORT(bp);
2183 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2185 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2187 #ifdef BNX2X_STOP_ON_ERROR
2188 if (unlikely(bp->panic)) {
2189 BNX2X_ERR("Can't load NIC when there is panic\n");
2194 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2196 /* Set the initial link reported state to link down */
2197 bnx2x_acquire_phy_lock(bp);
2198 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2199 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2200 &bp->last_reported_link.link_report_flags);
2201 bnx2x_release_phy_lock(bp);
2203 /* must be called before memory allocation and HW init */
2204 bnx2x_ilt_set_info(bp);
2207 * Zero fastpath structures preserving invariants like napi, which are
2208 * allocated only once, fp index, max_cos, bp pointer.
2209 * Also set fp->disable_tpa and txdata_ptr.
2211 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2212 for_each_queue(bp, i)
2214 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2215 bp->num_cnic_queues) *
2216 sizeof(struct bnx2x_fp_txdata));
2218 bp->fcoe_init = false;
2220 /* Set the receive queues buffer size */
2221 bnx2x_set_rx_buf_size(bp);
2223 if (bnx2x_alloc_mem(bp))
2226 /* As long as bnx2x_alloc_mem() may possibly update
2227 * bp->num_queues, bnx2x_set_real_num_queues() should always
2228 * come after it. At this stage cnic queues are not counted.
2230 rc = bnx2x_set_real_num_queues(bp, 0);
2232 BNX2X_ERR("Unable to set real_num_queues\n");
2233 LOAD_ERROR_EXIT(bp, load_error0);
2236 /* configure multi cos mappings in kernel.
2237 * this configuration may be overriden by a multi class queue discipline
2238 * or by a dcbx negotiation result.
2240 bnx2x_setup_tc(bp->dev, bp->max_cos);
2242 /* Add all NAPI objects */
2243 bnx2x_add_all_napi(bp);
2244 DP(NETIF_MSG_IFUP, "napi added\n");
2245 bnx2x_napi_enable(bp);
2247 /* set pf load just before approaching the MCP */
2248 bnx2x_set_pf_load(bp);
2250 /* Send LOAD_REQUEST command to MCP
2251 * Returns the type of LOAD command:
2252 * if it is the first port to be initialized
2253 * common blocks should be initialized, otherwise - not
2255 if (!BP_NOMCP(bp)) {
2258 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2259 DRV_MSG_SEQ_NUMBER_MASK);
2260 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2262 /* Get current FW pulse sequence */
2263 bp->fw_drv_pulse_wr_seq =
2264 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2265 DRV_PULSE_SEQ_MASK);
2266 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2268 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2269 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2271 BNX2X_ERR("MCP response failure, aborting\n");
2273 LOAD_ERROR_EXIT(bp, load_error1);
2275 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2276 BNX2X_ERR("Driver load refused\n");
2277 rc = -EBUSY; /* other port in diagnostic mode */
2278 LOAD_ERROR_EXIT(bp, load_error1);
2280 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2281 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2282 /* abort nic load if version mismatch */
2283 if (!bnx2x_test_firmware_version(bp, true)) {
2285 LOAD_ERROR_EXIT(bp, load_error2);
2290 int path = BP_PATH(bp);
2292 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2293 path, load_count[path][0], load_count[path][1],
2294 load_count[path][2]);
2295 load_count[path][0]++;
2296 load_count[path][1 + port]++;
2297 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2298 path, load_count[path][0], load_count[path][1],
2299 load_count[path][2]);
2300 if (load_count[path][0] == 1)
2301 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2302 else if (load_count[path][1 + port] == 1)
2303 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2305 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2308 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2309 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2310 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2313 * We need the barrier to ensure the ordering between the
2314 * writing to bp->port.pmf here and reading it from the
2315 * bnx2x_periodic_task().
2321 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2323 /* Init Function state controlling object */
2324 bnx2x__init_func_obj(bp);
2327 rc = bnx2x_init_hw(bp, load_code);
2329 BNX2X_ERR("HW init failed, aborting\n");
2330 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2331 LOAD_ERROR_EXIT(bp, load_error2);
2334 /* Connect to IRQs */
2335 rc = bnx2x_setup_irqs(bp);
2337 BNX2X_ERR("IRQs setup failed\n");
2338 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2339 LOAD_ERROR_EXIT(bp, load_error2);
2342 /* Setup NIC internals and enable interrupts */
2343 bnx2x_nic_init(bp, load_code);
2345 /* Init per-function objects */
2346 bnx2x_init_bp_objs(bp);
2348 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2349 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2350 (bp->common.shmem2_base)) {
2351 if (SHMEM2_HAS(bp, dcc_support))
2352 SHMEM2_WR(bp, dcc_support,
2353 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2354 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2355 if (SHMEM2_HAS(bp, afex_driver_support))
2356 SHMEM2_WR(bp, afex_driver_support,
2357 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2360 /* Set AFEX default VLAN tag to an invalid value */
2361 bp->afex_def_vlan_tag = -1;
2363 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2364 rc = bnx2x_func_start(bp);
2366 BNX2X_ERR("Function start failed!\n");
2367 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2368 LOAD_ERROR_EXIT(bp, load_error3);
2371 /* Send LOAD_DONE command to MCP */
2372 if (!BP_NOMCP(bp)) {
2373 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2375 BNX2X_ERR("MCP response failure, aborting\n");
2377 LOAD_ERROR_EXIT(bp, load_error3);
2381 rc = bnx2x_setup_leading(bp);
2383 BNX2X_ERR("Setup leading failed!\n");
2384 LOAD_ERROR_EXIT(bp, load_error3);
2387 for_each_nondefault_eth_queue(bp, i) {
2388 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2390 BNX2X_ERR("Queue setup failed\n");
2391 LOAD_ERROR_EXIT(bp, load_error3);
2395 rc = bnx2x_init_rss_pf(bp);
2397 BNX2X_ERR("PF RSS init failed\n");
2398 LOAD_ERROR_EXIT(bp, load_error3);
2401 /* Now when Clients are configured we are ready to work */
2402 bp->state = BNX2X_STATE_OPEN;
2404 /* Configure a ucast MAC */
2405 rc = bnx2x_set_eth_mac(bp, true);
2407 BNX2X_ERR("Setting Ethernet MAC failed\n");
2408 LOAD_ERROR_EXIT(bp, load_error3);
2411 if (bp->pending_max) {
2412 bnx2x_update_max_mf_config(bp, bp->pending_max);
2413 bp->pending_max = 0;
2417 bnx2x_initial_phy_init(bp, load_mode);
2418 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2420 /* Start fast path */
2422 /* Initialize Rx filter. */
2423 netif_addr_lock_bh(bp->dev);
2424 bnx2x_set_rx_mode(bp->dev);
2425 netif_addr_unlock_bh(bp->dev);
2428 switch (load_mode) {
2430 /* Tx queue should be only reenabled */
2431 netif_tx_wake_all_queues(bp->dev);
2435 netif_tx_start_all_queues(bp->dev);
2436 smp_mb__after_clear_bit();
2440 case LOAD_LOOPBACK_EXT:
2441 bp->state = BNX2X_STATE_DIAG;
2449 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2451 bnx2x__link_status_update(bp);
2453 /* start the timer */
2454 mod_timer(&bp->timer, jiffies + bp->current_interval);
2456 if (CNIC_ENABLED(bp))
2457 bnx2x_load_cnic(bp);
2459 /* mark driver is loaded in shmem2 */
2460 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2462 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2463 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2464 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2465 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2468 /* Wait for all pending SP commands to complete */
2469 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2470 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2471 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2475 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2476 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2477 bnx2x_dcbx_init(bp, false);
2479 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2483 #ifndef BNX2X_STOP_ON_ERROR
2485 bnx2x_int_disable_sync(bp, 1);
2487 /* Clean queueable objects */
2488 bnx2x_squeeze_objects(bp);
2490 /* Free SKBs, SGEs, TPA pool and driver internals */
2491 bnx2x_free_skbs(bp);
2492 for_each_rx_queue(bp, i)
2493 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2498 if (!BP_NOMCP(bp)) {
2499 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2500 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2505 bnx2x_napi_disable(bp);
2506 /* clear pf_load status, as it was already set */
2507 bnx2x_clear_pf_load(bp);
2512 #endif /* ! BNX2X_STOP_ON_ERROR */
2515 /* must be called with rtnl_lock */
2516 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2519 bool global = false;
2521 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2523 /* mark driver is unloaded in shmem2 */
2524 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2526 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2527 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2528 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2531 if ((bp->state == BNX2X_STATE_CLOSED) ||
2532 (bp->state == BNX2X_STATE_ERROR)) {
2533 /* We can get here if the driver has been unloaded
2534 * during parity error recovery and is either waiting for a
2535 * leader to complete or for other functions to unload and
2536 * then ifdown has been issued. In this case we want to
2537 * unload and let other functions to complete a recovery
2540 bp->recovery_state = BNX2X_RECOVERY_DONE;
2542 bnx2x_release_leader_lock(bp);
2545 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2546 BNX2X_ERR("Can't unload in closed or error state\n");
2551 * It's important to set the bp->state to the value different from
2552 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2553 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2555 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2558 if (CNIC_LOADED(bp))
2559 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2562 bnx2x_tx_disable(bp);
2563 netdev_reset_tc(bp->dev);
2565 bp->rx_mode = BNX2X_RX_MODE_NONE;
2567 del_timer_sync(&bp->timer);
2569 /* Set ALWAYS_ALIVE bit in shmem */
2570 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2572 bnx2x_drv_pulse(bp);
2574 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2575 bnx2x_save_statistics(bp);
2577 /* Cleanup the chip if needed */
2578 if (unload_mode != UNLOAD_RECOVERY)
2579 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2581 /* Send the UNLOAD_REQUEST to the MCP */
2582 bnx2x_send_unload_req(bp, unload_mode);
2585 * Prevent transactions to host from the functions on the
2586 * engine that doesn't reset global blocks in case of global
2587 * attention once gloabl blocks are reset and gates are opened
2588 * (the engine which leader will perform the recovery
2591 if (!CHIP_IS_E1x(bp))
2592 bnx2x_pf_disable(bp);
2594 /* Disable HW interrupts, NAPI */
2595 bnx2x_netif_stop(bp, 1);
2596 /* Delete all NAPI objects */
2597 bnx2x_del_all_napi(bp);
2598 if (CNIC_LOADED(bp))
2599 bnx2x_del_all_napi_cnic(bp);
2603 /* Report UNLOAD_DONE to MCP */
2604 bnx2x_send_unload_done(bp, false);
2608 * At this stage no more interrupts will arrive so we may safly clean
2609 * the queueable objects here in case they failed to get cleaned so far.
2611 bnx2x_squeeze_objects(bp);
2613 /* There should be no more pending SP commands at this stage */
2618 /* Free SKBs, SGEs, TPA pool and driver internals */
2619 bnx2x_free_skbs(bp);
2620 if (CNIC_LOADED(bp))
2621 bnx2x_free_skbs_cnic(bp);
2622 for_each_rx_queue(bp, i)
2623 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2625 if (CNIC_LOADED(bp)) {
2626 bnx2x_free_fp_mem_cnic(bp);
2627 bnx2x_free_mem_cnic(bp);
2631 bp->state = BNX2X_STATE_CLOSED;
2632 bp->cnic_loaded = false;
2634 /* Check if there are pending parity attentions. If there are - set
2635 * RECOVERY_IN_PROGRESS.
2637 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2638 bnx2x_set_reset_in_progress(bp);
2640 /* Set RESET_IS_GLOBAL if needed */
2642 bnx2x_set_reset_global(bp);
2646 /* The last driver must disable a "close the gate" if there is no
2647 * parity attention or "process kill" pending.
2649 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2650 bnx2x_disable_close_the_gate(bp);
2652 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2657 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2661 /* If there is no power capability, silently succeed */
2663 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2667 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2671 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2672 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2673 PCI_PM_CTRL_PME_STATUS));
2675 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2676 /* delay required during transition out of D3hot */
2681 /* If there are other clients above don't
2682 shut down the power */
2683 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2685 /* Don't shut down the power for emulation and FPGA */
2686 if (CHIP_REV_IS_SLOW(bp))
2689 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2693 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2695 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2698 /* No more memory access after this point until
2699 * device is brought back to D0.
2704 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2711 * net_device service functions
2713 int bnx2x_poll(struct napi_struct *napi, int budget)
2717 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2719 struct bnx2x *bp = fp->bp;
2722 #ifdef BNX2X_STOP_ON_ERROR
2723 if (unlikely(bp->panic)) {
2724 napi_complete(napi);
2729 for_each_cos_in_tx_queue(fp, cos)
2730 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2731 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
2734 if (bnx2x_has_rx_work(fp)) {
2735 work_done += bnx2x_rx_int(fp, budget - work_done);
2737 /* must not complete if we consumed full budget */
2738 if (work_done >= budget)
2742 /* Fall out from the NAPI loop if needed */
2743 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2745 /* No need to update SB for FCoE L2 ring as long as
2746 * it's connected to the default SB and the SB
2747 * has been updated when NAPI was scheduled.
2749 if (IS_FCOE_FP(fp)) {
2750 napi_complete(napi);
2753 bnx2x_update_fpsb_idx(fp);
2754 /* bnx2x_has_rx_work() reads the status block,
2755 * thus we need to ensure that status block indices
2756 * have been actually read (bnx2x_update_fpsb_idx)
2757 * prior to this check (bnx2x_has_rx_work) so that
2758 * we won't write the "newer" value of the status block
2759 * to IGU (if there was a DMA right after
2760 * bnx2x_has_rx_work and if there is no rmb, the memory
2761 * reading (bnx2x_update_fpsb_idx) may be postponed
2762 * to right before bnx2x_ack_sb). In this case there
2763 * will never be another interrupt until there is
2764 * another update of the status block, while there
2765 * is still unhandled work.
2769 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2770 napi_complete(napi);
2771 /* Re-enable interrupts */
2772 DP(NETIF_MSG_RX_STATUS,
2773 "Update index to %d\n", fp->fp_hc_idx);
2774 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2775 le16_to_cpu(fp->fp_hc_idx),
2785 /* we split the first BD into headers and data BDs
2786 * to ease the pain of our fellow microcode engineers
2787 * we use one mapping for both BDs
2789 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2790 struct bnx2x_fp_txdata *txdata,
2791 struct sw_tx_bd *tx_buf,
2792 struct eth_tx_start_bd **tx_bd, u16 hlen,
2793 u16 bd_prod, int nbd)
2795 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2796 struct eth_tx_bd *d_tx_bd;
2798 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2800 /* first fix first BD */
2801 h_tx_bd->nbd = cpu_to_le16(nbd);
2802 h_tx_bd->nbytes = cpu_to_le16(hlen);
2804 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2805 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2807 /* now get a new data BD
2808 * (after the pbd) and fill it */
2809 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2810 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2812 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2813 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2815 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2816 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2817 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2819 /* this marks the BD as one that has no individual mapping */
2820 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2822 DP(NETIF_MSG_TX_QUEUED,
2823 "TSO split data size is %d (%x:%x)\n",
2824 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2827 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2832 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2835 csum = (u16) ~csum_fold(csum_sub(csum,
2836 csum_partial(t_header - fix, fix, 0)));
2839 csum = (u16) ~csum_fold(csum_add(csum,
2840 csum_partial(t_header, -fix, 0)));
2842 return swab16(csum);
2845 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2849 if (skb->ip_summed != CHECKSUM_PARTIAL)
2853 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2855 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2856 rc |= XMIT_CSUM_TCP;
2860 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2861 rc |= XMIT_CSUM_TCP;
2865 if (skb_is_gso_v6(skb))
2866 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2867 else if (skb_is_gso(skb))
2868 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2873 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2874 /* check if packet requires linearization (packet is too fragmented)
2875 no need to check fragmentation if page size > 8K (there will be no
2876 violation to FW restrictions) */
2877 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2882 int first_bd_sz = 0;
2884 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2885 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2887 if (xmit_type & XMIT_GSO) {
2888 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2889 /* Check if LSO packet needs to be copied:
2890 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2891 int wnd_size = MAX_FETCH_BD - 3;
2892 /* Number of windows to check */
2893 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2898 /* Headers length */
2899 hlen = (int)(skb_transport_header(skb) - skb->data) +
2902 /* Amount of data (w/o headers) on linear part of SKB*/
2903 first_bd_sz = skb_headlen(skb) - hlen;
2905 wnd_sum = first_bd_sz;
2907 /* Calculate the first sum - it's special */
2908 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2910 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2912 /* If there was data on linear skb data - check it */
2913 if (first_bd_sz > 0) {
2914 if (unlikely(wnd_sum < lso_mss)) {
2919 wnd_sum -= first_bd_sz;
2922 /* Others are easier: run through the frag list and
2923 check all windows */
2924 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2926 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2928 if (unlikely(wnd_sum < lso_mss)) {
2933 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2936 /* in non-LSO too fragmented packet should always
2943 if (unlikely(to_copy))
2944 DP(NETIF_MSG_TX_QUEUED,
2945 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
2946 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2947 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2953 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2956 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2957 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2958 ETH_TX_PARSE_BD_E2_LSO_MSS;
2959 if ((xmit_type & XMIT_GSO_V6) &&
2960 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2961 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2965 * bnx2x_set_pbd_gso - update PBD in GSO case.
2969 * @xmit_type: xmit flags
2971 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2972 struct eth_tx_parse_bd_e1x *pbd,
2975 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2976 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2977 pbd->tcp_flags = pbd_tcp_flags(skb);
2979 if (xmit_type & XMIT_GSO_V4) {
2980 pbd->ip_id = swab16(ip_hdr(skb)->id);
2981 pbd->tcp_pseudo_csum =
2982 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2984 0, IPPROTO_TCP, 0));
2987 pbd->tcp_pseudo_csum =
2988 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2989 &ipv6_hdr(skb)->daddr,
2990 0, IPPROTO_TCP, 0));
2992 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2996 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2998 * @bp: driver handle
3000 * @parsing_data: data to be updated
3001 * @xmit_type: xmit flags
3005 static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3006 u32 *parsing_data, u32 xmit_type)
3009 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3010 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3011 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
3013 if (xmit_type & XMIT_CSUM_TCP) {
3014 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3015 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3016 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3018 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3020 /* We support checksum offload for TCP and UDP only.
3021 * No need to pass the UDP header length - it's a constant.
3023 return skb_transport_header(skb) +
3024 sizeof(struct udphdr) - skb->data;
3027 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3028 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3030 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3032 if (xmit_type & XMIT_CSUM_V4)
3033 tx_start_bd->bd_flags.as_bitfield |=
3034 ETH_TX_BD_FLAGS_IP_CSUM;
3036 tx_start_bd->bd_flags.as_bitfield |=
3037 ETH_TX_BD_FLAGS_IPV6;
3039 if (!(xmit_type & XMIT_CSUM_TCP))
3040 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3044 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3046 * @bp: driver handle
3048 * @pbd: parse BD to be updated
3049 * @xmit_type: xmit flags
3051 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3052 struct eth_tx_parse_bd_e1x *pbd,
3055 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3057 /* for now NS flag is not used in Linux */
3059 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3060 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3062 pbd->ip_hlen_w = (skb_transport_header(skb) -
3063 skb_network_header(skb)) >> 1;
3065 hlen += pbd->ip_hlen_w;
3067 /* We support checksum offload for TCP and UDP only */
3068 if (xmit_type & XMIT_CSUM_TCP)
3069 hlen += tcp_hdrlen(skb) / 2;
3071 hlen += sizeof(struct udphdr) / 2;
3073 pbd->total_hlen_w = cpu_to_le16(hlen);
3076 if (xmit_type & XMIT_CSUM_TCP) {
3077 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3080 s8 fix = SKB_CS_OFF(skb); /* signed! */
3082 DP(NETIF_MSG_TX_QUEUED,
3083 "hlen %d fix %d csum before fix %x\n",
3084 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3086 /* HW bug: fixup the CSUM */
3087 pbd->tcp_pseudo_csum =
3088 bnx2x_csum_fix(skb_transport_header(skb),
3091 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3092 pbd->tcp_pseudo_csum);
3098 /* called with netif_tx_lock
3099 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3100 * netif_wake_queue()
3102 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3104 struct bnx2x *bp = netdev_priv(dev);
3106 struct netdev_queue *txq;
3107 struct bnx2x_fp_txdata *txdata;
3108 struct sw_tx_bd *tx_buf;
3109 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3110 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3111 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3112 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3113 u32 pbd_e2_parsing_data = 0;
3114 u16 pkt_prod, bd_prod;
3117 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3120 __le16 pkt_size = 0;
3122 u8 mac_type = UNICAST_ADDRESS;
3124 #ifdef BNX2X_STOP_ON_ERROR
3125 if (unlikely(bp->panic))
3126 return NETDEV_TX_BUSY;
3129 txq_index = skb_get_queue_mapping(skb);
3130 txq = netdev_get_tx_queue(dev, txq_index);
3132 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3134 txdata = &bp->bnx2x_txq[txq_index];
3136 /* enable this debug print to view the transmission queue being used
3137 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3138 txq_index, fp_index, txdata_index); */
3140 /* enable this debug print to view the tranmission details
3141 DP(NETIF_MSG_TX_QUEUED,
3142 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3143 txdata->cid, fp_index, txdata_index, txdata, fp); */
3145 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3146 skb_shinfo(skb)->nr_frags +
3148 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3149 /* Handle special storage cases separately */
3150 if (txdata->tx_ring_size == 0) {
3151 struct bnx2x_eth_q_stats *q_stats =
3152 bnx2x_fp_qstats(bp, txdata->parent_fp);
3153 q_stats->driver_filtered_tx_pkt++;
3155 return NETDEV_TX_OK;
3157 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3158 netif_tx_stop_queue(txq);
3159 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3161 return NETDEV_TX_BUSY;
3164 DP(NETIF_MSG_TX_QUEUED,
3165 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
3166 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3167 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3169 eth = (struct ethhdr *)skb->data;
3171 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3172 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3173 if (is_broadcast_ether_addr(eth->h_dest))
3174 mac_type = BROADCAST_ADDRESS;
3176 mac_type = MULTICAST_ADDRESS;
3179 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3180 /* First, check if we need to linearize the skb (due to FW
3181 restrictions). No need to check fragmentation if page size > 8K
3182 (there will be no violation to FW restrictions) */
3183 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3184 /* Statistics of linearization */
3186 if (skb_linearize(skb) != 0) {
3187 DP(NETIF_MSG_TX_QUEUED,
3188 "SKB linearization failed - silently dropping this SKB\n");
3189 dev_kfree_skb_any(skb);
3190 return NETDEV_TX_OK;
3194 /* Map skb linear data for DMA */
3195 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3196 skb_headlen(skb), DMA_TO_DEVICE);
3197 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3198 DP(NETIF_MSG_TX_QUEUED,
3199 "SKB mapping failed - silently dropping this SKB\n");
3200 dev_kfree_skb_any(skb);
3201 return NETDEV_TX_OK;
3204 Please read carefully. First we use one BD which we mark as start,
3205 then we have a parsing info BD (used for TSO or xsum),
3206 and only then we have the rest of the TSO BDs.
3207 (don't forget to mark the last one as last,
3208 and to unmap only AFTER you write to the BD ...)
3209 And above all, all pdb sizes are in words - NOT DWORDS!
3212 /* get current pkt produced now - advance it just before sending packet
3213 * since mapping of pages may fail and cause packet to be dropped
3215 pkt_prod = txdata->tx_pkt_prod;
3216 bd_prod = TX_BD(txdata->tx_bd_prod);
3218 /* get a tx_buf and first BD
3219 * tx_start_bd may be changed during SPLIT,
3220 * but first_bd will always stay first
3222 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3223 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3224 first_bd = tx_start_bd;
3226 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3227 SET_FLAG(tx_start_bd->general_data,
3228 ETH_TX_START_BD_PARSE_NBDS,
3232 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
3234 /* remember the first BD of the packet */
3235 tx_buf->first_bd = txdata->tx_bd_prod;
3239 DP(NETIF_MSG_TX_QUEUED,
3240 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3241 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3243 if (vlan_tx_tag_present(skb)) {
3244 tx_start_bd->vlan_or_ethertype =
3245 cpu_to_le16(vlan_tx_tag_get(skb));
3246 tx_start_bd->bd_flags.as_bitfield |=
3247 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3249 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3251 /* turn on parsing and get a BD */
3252 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3254 if (xmit_type & XMIT_CSUM)
3255 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3257 if (!CHIP_IS_E1x(bp)) {
3258 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3259 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3260 /* Set PBD in checksum offload case */
3261 if (xmit_type & XMIT_CSUM)
3262 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3263 &pbd_e2_parsing_data,
3267 * fill in the MAC addresses in the PBD - for local
3270 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3271 &pbd_e2->src_mac_addr_mid,
3272 &pbd_e2->src_mac_addr_lo,
3274 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3275 &pbd_e2->dst_mac_addr_mid,
3276 &pbd_e2->dst_mac_addr_lo,
3280 SET_FLAG(pbd_e2_parsing_data,
3281 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3283 u16 global_data = 0;
3284 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3285 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3286 /* Set PBD in checksum offload case */
3287 if (xmit_type & XMIT_CSUM)
3288 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3290 SET_FLAG(global_data,
3291 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3292 pbd_e1x->global_data |= cpu_to_le16(global_data);
3295 /* Setup the data pointer of the first BD of the packet */
3296 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3297 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3298 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3299 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3300 pkt_size = tx_start_bd->nbytes;
3302 DP(NETIF_MSG_TX_QUEUED,
3303 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3304 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3305 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3306 tx_start_bd->bd_flags.as_bitfield,
3307 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3309 if (xmit_type & XMIT_GSO) {
3311 DP(NETIF_MSG_TX_QUEUED,
3312 "TSO packet len %d hlen %d total len %d tso size %d\n",
3313 skb->len, hlen, skb_headlen(skb),
3314 skb_shinfo(skb)->gso_size);
3316 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3318 if (unlikely(skb_headlen(skb) > hlen))
3319 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3322 if (!CHIP_IS_E1x(bp))
3323 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3326 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3329 /* Set the PBD's parsing_data field if not zero
3330 * (for the chips newer than 57711).
3332 if (pbd_e2_parsing_data)
3333 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3335 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3337 /* Handle fragmented skb */
3338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3339 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3341 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3342 skb_frag_size(frag), DMA_TO_DEVICE);
3343 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3344 unsigned int pkts_compl = 0, bytes_compl = 0;
3346 DP(NETIF_MSG_TX_QUEUED,
3347 "Unable to map page - dropping packet...\n");
3349 /* we need unmap all buffers already mapped
3351 * first_bd->nbd need to be properly updated
3352 * before call to bnx2x_free_tx_pkt
3354 first_bd->nbd = cpu_to_le16(nbd);
3355 bnx2x_free_tx_pkt(bp, txdata,
3356 TX_BD(txdata->tx_pkt_prod),
3357 &pkts_compl, &bytes_compl);
3358 return NETDEV_TX_OK;
3361 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3362 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3363 if (total_pkt_bd == NULL)
3364 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3366 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3367 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3368 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3369 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3372 DP(NETIF_MSG_TX_QUEUED,
3373 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3374 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3375 le16_to_cpu(tx_data_bd->nbytes));
3378 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3380 /* update with actual num BDs */
3381 first_bd->nbd = cpu_to_le16(nbd);
3383 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3385 /* now send a tx doorbell, counting the next BD
3386 * if the packet contains or ends with it
3388 if (TX_BD_POFF(bd_prod) < nbd)
3391 /* total_pkt_bytes should be set on the first data BD if
3392 * it's not an LSO packet and there is more than one
3393 * data BD. In this case pkt_size is limited by an MTU value.
3394 * However we prefer to set it for an LSO packet (while we don't
3395 * have to) in order to save some CPU cycles in a none-LSO
3396 * case, when we much more care about them.
3398 if (total_pkt_bd != NULL)
3399 total_pkt_bd->total_pkt_bytes = pkt_size;
3402 DP(NETIF_MSG_TX_QUEUED,
3403 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3404 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3405 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3406 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3407 le16_to_cpu(pbd_e1x->total_hlen_w));
3409 DP(NETIF_MSG_TX_QUEUED,
3410 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3411 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3412 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3413 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3414 pbd_e2->parsing_data);
3415 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3417 netdev_tx_sent_queue(txq, skb->len);
3419 skb_tx_timestamp(skb);
3421 txdata->tx_pkt_prod++;
3423 * Make sure that the BD data is updated before updating the producer
3424 * since FW might read the BD right after the producer is updated.
3425 * This is only applicable for weak-ordered memory model archs such
3426 * as IA-64. The following barrier is also mandatory since FW will
3427 * assumes packets must have BDs.
3431 txdata->tx_db.data.prod += nbd;
3434 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3438 txdata->tx_bd_prod += nbd;
3440 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3441 netif_tx_stop_queue(txq);
3443 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3444 * ordering of set_bit() in netif_tx_stop_queue() and read of
3448 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3449 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3450 netif_tx_wake_queue(txq);
3454 return NETDEV_TX_OK;
3458 * bnx2x_setup_tc - routine to configure net_device for multi tc
3460 * @netdev: net device to configure
3461 * @tc: number of traffic classes to enable
3463 * callback connected to the ndo_setup_tc function pointer
3465 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3467 int cos, prio, count, offset;
3468 struct bnx2x *bp = netdev_priv(dev);
3470 /* setup tc must be called under rtnl lock */
3473 /* no traffic classes requested. aborting */
3475 netdev_reset_tc(dev);
3479 /* requested to support too many traffic classes */
3480 if (num_tc > bp->max_cos) {
3481 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3482 num_tc, bp->max_cos);
3486 /* declare amount of supported traffic classes */
3487 if (netdev_set_num_tc(dev, num_tc)) {
3488 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3492 /* configure priority to traffic class mapping */
3493 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3494 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3495 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3496 "mapping priority %d to tc %d\n",
3497 prio, bp->prio_to_cos[prio]);
3501 /* Use this configuration to diffrentiate tc0 from other COSes
3502 This can be used for ets or pfc, and save the effort of setting
3503 up a multio class queue disc or negotiating DCBX with a switch
3504 netdev_set_prio_tc_map(dev, 0, 0);
3505 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3506 for (prio = 1; prio < 16; prio++) {
3507 netdev_set_prio_tc_map(dev, prio, 1);
3508 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3511 /* configure traffic class to transmission queue mapping */
3512 for (cos = 0; cos < bp->max_cos; cos++) {
3513 count = BNX2X_NUM_ETH_QUEUES(bp);
3514 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3515 netdev_set_tc_queue(dev, cos, count, offset);
3516 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3517 "mapping tc %d to offset %d count %d\n",
3518 cos, offset, count);
3524 /* called with rtnl_lock */
3525 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3527 struct sockaddr *addr = p;
3528 struct bnx2x *bp = netdev_priv(dev);
3531 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3532 BNX2X_ERR("Requested MAC address is not valid\n");
3536 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3537 !is_zero_ether_addr(addr->sa_data)) {
3538 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3542 if (netif_running(dev)) {
3543 rc = bnx2x_set_eth_mac(bp, false);
3548 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3549 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3551 if (netif_running(dev))
3552 rc = bnx2x_set_eth_mac(bp, true);
3557 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3559 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3560 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3565 if (IS_FCOE_IDX(fp_index)) {
3566 memset(sb, 0, sizeof(union host_hc_status_block));
3567 fp->status_blk_mapping = 0;
3570 if (!CHIP_IS_E1x(bp))
3571 BNX2X_PCI_FREE(sb->e2_sb,
3572 bnx2x_fp(bp, fp_index,
3573 status_blk_mapping),
3574 sizeof(struct host_hc_status_block_e2));
3576 BNX2X_PCI_FREE(sb->e1x_sb,
3577 bnx2x_fp(bp, fp_index,
3578 status_blk_mapping),
3579 sizeof(struct host_hc_status_block_e1x));
3583 if (!skip_rx_queue(bp, fp_index)) {
3584 bnx2x_free_rx_bds(fp);
3586 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3587 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3588 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3589 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3590 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3592 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3593 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3594 sizeof(struct eth_fast_path_rx_cqe) *
3598 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3599 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3600 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3601 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3605 if (!skip_tx_queue(bp, fp_index)) {
3606 /* fastpath tx rings: tx_buf tx_desc */
3607 for_each_cos_in_tx_queue(fp, cos) {
3608 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3610 DP(NETIF_MSG_IFDOWN,
3611 "freeing tx memory of fp %d cos %d cid %d\n",
3612 fp_index, cos, txdata->cid);
3614 BNX2X_FREE(txdata->tx_buf_ring);
3615 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3616 txdata->tx_desc_mapping,
3617 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3620 /* end of fastpath */
3623 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3626 for_each_cnic_queue(bp, i)
3627 bnx2x_free_fp_mem_at(bp, i);
3630 void bnx2x_free_fp_mem(struct bnx2x *bp)
3633 for_each_eth_queue(bp, i)
3634 bnx2x_free_fp_mem_at(bp, i);
3637 static void set_sb_shortcuts(struct bnx2x *bp, int index)
3639 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3640 if (!CHIP_IS_E1x(bp)) {
3641 bnx2x_fp(bp, index, sb_index_values) =
3642 (__le16 *)status_blk.e2_sb->sb.index_values;
3643 bnx2x_fp(bp, index, sb_running_index) =
3644 (__le16 *)status_blk.e2_sb->sb.running_index;
3646 bnx2x_fp(bp, index, sb_index_values) =
3647 (__le16 *)status_blk.e1x_sb->sb.index_values;
3648 bnx2x_fp(bp, index, sb_running_index) =
3649 (__le16 *)status_blk.e1x_sb->sb.running_index;
3653 /* Returns the number of actually allocated BDs */
3654 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3657 struct bnx2x *bp = fp->bp;
3658 u16 ring_prod, cqe_ring_prod;
3659 int i, failure_cnt = 0;
3661 fp->rx_comp_cons = 0;
3662 cqe_ring_prod = ring_prod = 0;
3664 /* This routine is called only during fo init so
3665 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3667 for (i = 0; i < rx_ring_size; i++) {
3668 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3672 ring_prod = NEXT_RX_IDX(ring_prod);
3673 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3674 WARN_ON(ring_prod <= (i - failure_cnt));
3678 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3679 i - failure_cnt, fp->index);
3681 fp->rx_bd_prod = ring_prod;
3682 /* Limit the CQE producer by the CQE ring size */
3683 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3685 fp->rx_pkt = fp->rx_calls = 0;
3687 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3689 return i - failure_cnt;
3692 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3696 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3697 struct eth_rx_cqe_next_page *nextpg;
3699 nextpg = (struct eth_rx_cqe_next_page *)
3700 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3702 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3703 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3705 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3706 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3710 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3712 union host_hc_status_block *sb;
3713 struct bnx2x_fastpath *fp = &bp->fp[index];
3716 int rx_ring_size = 0;
3718 if (!bp->rx_ring_size &&
3719 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3720 rx_ring_size = MIN_RX_SIZE_NONTPA;
3721 bp->rx_ring_size = rx_ring_size;
3722 } else if (!bp->rx_ring_size) {
3723 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3725 if (CHIP_IS_E3(bp)) {
3726 u32 cfg = SHMEM_RD(bp,
3727 dev_info.port_hw_config[BP_PORT(bp)].
3730 /* Decrease ring size for 1G functions */
3731 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3732 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3736 /* allocate at least number of buffers required by FW */
3737 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3738 MIN_RX_SIZE_TPA, rx_ring_size);
3740 bp->rx_ring_size = rx_ring_size;
3741 } else /* if rx_ring_size specified - use it */
3742 rx_ring_size = bp->rx_ring_size;
3745 sb = &bnx2x_fp(bp, index, status_blk);
3747 if (!IS_FCOE_IDX(index)) {
3749 if (!CHIP_IS_E1x(bp))
3750 BNX2X_PCI_ALLOC(sb->e2_sb,
3751 &bnx2x_fp(bp, index, status_blk_mapping),
3752 sizeof(struct host_hc_status_block_e2));
3754 BNX2X_PCI_ALLOC(sb->e1x_sb,
3755 &bnx2x_fp(bp, index, status_blk_mapping),
3756 sizeof(struct host_hc_status_block_e1x));
3759 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3760 * set shortcuts for it.
3762 if (!IS_FCOE_IDX(index))
3763 set_sb_shortcuts(bp, index);
3766 if (!skip_tx_queue(bp, index)) {
3767 /* fastpath tx rings: tx_buf tx_desc */
3768 for_each_cos_in_tx_queue(fp, cos) {
3769 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
3772 "allocating tx memory of fp %d cos %d\n",
3775 BNX2X_ALLOC(txdata->tx_buf_ring,
3776 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3777 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3778 &txdata->tx_desc_mapping,
3779 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3784 if (!skip_rx_queue(bp, index)) {
3785 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3786 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3787 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3788 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3789 &bnx2x_fp(bp, index, rx_desc_mapping),
3790 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3792 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3793 &bnx2x_fp(bp, index, rx_comp_mapping),
3794 sizeof(struct eth_fast_path_rx_cqe) *
3798 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3799 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3801 &bnx2x_fp(bp, index, rx_sge_mapping),
3802 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3804 bnx2x_set_next_page_rx_bd(fp);
3807 bnx2x_set_next_page_rx_cq(fp);
3810 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3811 if (ring_size < rx_ring_size)
3817 /* handles low memory cases */
3819 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3821 /* FW will drop all packets if queue is not big enough,
3822 * In these cases we disable the queue
3823 * Min size is different for OOO, TPA and non-TPA queues
3825 if (ring_size < (fp->disable_tpa ?
3826 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3827 /* release memory allocated for this queue */
3828 bnx2x_free_fp_mem_at(bp, index);
3834 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3838 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3839 /* we will fail load process instead of mark
3847 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3851 /* 1. Allocate FP for leading - fatal if error
3852 * 2. Allocate RSS - fix number of queues if error
3856 if (bnx2x_alloc_fp_mem_at(bp, 0))
3860 for_each_nondefault_eth_queue(bp, i)
3861 if (bnx2x_alloc_fp_mem_at(bp, i))
3864 /* handle memory failures */
3865 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3866 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3869 if (CNIC_SUPPORT(bp))
3870 /* move non eth FPs next to last eth FP
3871 * must be done in that order
3872 * FCOE_IDX < FWD_IDX < OOO_IDX
3875 /* move FCoE fp even NO_FCOE_FLAG is on */
3876 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3877 bp->num_ethernet_queues -= delta;
3878 bp->num_queues = bp->num_ethernet_queues +
3879 bp->num_cnic_queues;
3880 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3881 bp->num_queues + delta, bp->num_queues);
3887 void bnx2x_free_mem_bp(struct bnx2x *bp)
3889 kfree(bp->fp->tpa_info);
3892 kfree(bp->fp_stats);
3893 kfree(bp->bnx2x_txq);
3894 kfree(bp->msix_table);
3898 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
3900 struct bnx2x_fastpath *fp;
3901 struct msix_entry *tbl;
3902 struct bnx2x_ilt *ilt;
3903 int msix_table_size = 0;
3904 int fp_array_size, txq_array_size;
3908 * The biggest MSI-X table we might need is as a maximum number of fast
3909 * path IGU SBs plus default SB (for PF).
3911 msix_table_size = bp->igu_sb_cnt + 1;
3913 /* fp array: RSS plus CNIC related L2 queues */
3914 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3915 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3917 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
3920 for (i = 0; i < fp_array_size; i++) {
3922 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3923 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3924 if (!(fp[i].tpa_info))
3930 /* allocate sp objs */
3931 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3936 /* allocate fp_stats */
3937 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3942 /* Allocate memory for the transmission queues array */
3944 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3945 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3947 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3953 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3956 bp->msix_table = tbl;
3959 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3966 bnx2x_free_mem_bp(bp);
3971 int bnx2x_reload_if_running(struct net_device *dev)
3973 struct bnx2x *bp = netdev_priv(dev);
3975 if (unlikely(!netif_running(dev)))
3978 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
3979 return bnx2x_nic_load(bp, LOAD_NORMAL);
3982 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3984 u32 sel_phy_idx = 0;
3985 if (bp->link_params.num_phys <= 1)
3988 if (bp->link_vars.link_up) {
3989 sel_phy_idx = EXT_PHY1;
3990 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3991 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3992 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3993 sel_phy_idx = EXT_PHY2;
3996 switch (bnx2x_phy_selection(&bp->link_params)) {
3997 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3998 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3999 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4000 sel_phy_idx = EXT_PHY1;
4002 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4003 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4004 sel_phy_idx = EXT_PHY2;
4012 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4014 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4016 * The selected actived PHY is always after swapping (in case PHY
4017 * swapping is enabled). So when swapping is enabled, we need to reverse
4021 if (bp->link_params.multi_phy_config &
4022 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4023 if (sel_phy_idx == EXT_PHY1)
4024 sel_phy_idx = EXT_PHY2;
4025 else if (sel_phy_idx == EXT_PHY2)
4026 sel_phy_idx = EXT_PHY1;
4028 return LINK_CONFIG_IDX(sel_phy_idx);
4031 #ifdef NETDEV_FCOE_WWNN
4032 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4034 struct bnx2x *bp = netdev_priv(dev);
4035 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4038 case NETDEV_FCOE_WWNN:
4039 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4040 cp->fcoe_wwn_node_name_lo);
4042 case NETDEV_FCOE_WWPN:
4043 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4044 cp->fcoe_wwn_port_name_lo);
4047 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4055 /* called with rtnl_lock */
4056 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4058 struct bnx2x *bp = netdev_priv(dev);
4060 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4061 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4065 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4066 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4067 BNX2X_ERR("Can't support requested MTU size\n");
4071 /* This does not race with packet allocation
4072 * because the actual alloc size is
4073 * only updated as part of load
4077 return bnx2x_reload_if_running(dev);
4080 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4081 netdev_features_t features)
4083 struct bnx2x *bp = netdev_priv(dev);
4085 /* TPA requires Rx CSUM offloading */
4086 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4087 features &= ~NETIF_F_LRO;
4088 features &= ~NETIF_F_GRO;
4094 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4096 struct bnx2x *bp = netdev_priv(dev);
4097 u32 flags = bp->flags;
4098 bool bnx2x_reload = false;
4100 if (features & NETIF_F_LRO)
4101 flags |= TPA_ENABLE_FLAG;
4103 flags &= ~TPA_ENABLE_FLAG;
4105 if (features & NETIF_F_GRO)
4106 flags |= GRO_ENABLE_FLAG;
4108 flags &= ~GRO_ENABLE_FLAG;
4110 if (features & NETIF_F_LOOPBACK) {
4111 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4112 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4113 bnx2x_reload = true;
4116 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4117 bp->link_params.loopback_mode = LOOPBACK_NONE;
4118 bnx2x_reload = true;
4122 if (flags ^ bp->flags) {
4124 bnx2x_reload = true;
4128 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4129 return bnx2x_reload_if_running(dev);
4130 /* else: bnx2x_nic_load() will be called at end of recovery */
4136 void bnx2x_tx_timeout(struct net_device *dev)
4138 struct bnx2x *bp = netdev_priv(dev);
4140 #ifdef BNX2X_STOP_ON_ERROR
4145 smp_mb__before_clear_bit();
4146 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4147 smp_mb__after_clear_bit();
4149 /* This allows the netif to be shutdown gracefully before resetting */
4150 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4153 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4155 struct net_device *dev = pci_get_drvdata(pdev);
4159 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4162 bp = netdev_priv(dev);
4166 pci_save_state(pdev);
4168 if (!netif_running(dev)) {
4173 netif_device_detach(dev);
4175 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4177 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4184 int bnx2x_resume(struct pci_dev *pdev)
4186 struct net_device *dev = pci_get_drvdata(pdev);
4191 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4194 bp = netdev_priv(dev);
4196 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4197 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4203 pci_restore_state(pdev);
4205 if (!netif_running(dev)) {
4210 bnx2x_set_power_state(bp, PCI_D0);
4211 netif_device_attach(dev);
4213 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4221 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4224 /* ustorm cxt validation */
4225 cxt->ustorm_ag_context.cdu_usage =
4226 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4227 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4228 /* xcontext validation */
4229 cxt->xstorm_ag_context.cdu_reserved =
4230 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4231 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4234 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4235 u8 fw_sb_id, u8 sb_index,
4239 u32 addr = BAR_CSTRORM_INTMEM +
4240 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4241 REG_WR8(bp, addr, ticks);
4243 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4244 port, fw_sb_id, sb_index, ticks);
4247 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4248 u16 fw_sb_id, u8 sb_index,
4251 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4252 u32 addr = BAR_CSTRORM_INTMEM +
4253 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4254 u16 flags = REG_RD16(bp, addr);
4256 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4257 flags |= enable_flag;
4258 REG_WR16(bp, addr, flags);
4260 "port %x fw_sb_id %d sb_index %d disable %d\n",
4261 port, fw_sb_id, sb_index, disable);
4264 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4265 u8 sb_index, u8 disable, u16 usec)
4267 int port = BP_PORT(bp);
4268 u8 ticks = usec / BNX2X_BTR;
4270 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4272 disable = disable ? 1 : (usec ? 0 : 1);
4273 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);