1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
33 * bnx2x_move_fp - move content of the fastpath structure.
36 * @from: source FP index
37 * @to: destination FP index
39 * Makes sure the contents of the bp->fp[to].napi is kept
40 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
42 * source onto the target. Update txdata pointers and related
45 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
49 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
53 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
59 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
63 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
82 memcpy(&bp->bnx2x_txq[new_txdata_index],
83 &bp->bnx2x_txq[old_txdata_index],
84 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
96 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
112 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
120 * @delta: number of eth queues which were not allocated
122 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden
129 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) {
131 struct bnx2x_fastpath *fp = &bp->fp[i];
132 int new_idx = cos * (old_eth_num - delta) + i;
134 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135 sizeof(struct bnx2x_fp_txdata));
136 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
141 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
143 /* free skb in the packet ring at pos idx
144 * return idx of last bd freed
146 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
147 u16 idx, unsigned int *pkts_compl,
148 unsigned int *bytes_compl)
150 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
151 struct eth_tx_start_bd *tx_start_bd;
152 struct eth_tx_bd *tx_data_bd;
153 struct sk_buff *skb = tx_buf->skb;
154 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
160 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
161 txdata->txq_index, idx, tx_buf, skb);
164 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170 #ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
176 new_cons = nbd + tx_buf->first_bd;
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
181 /* Skip a parse bd... */
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
205 (*bytes_compl) += skb->len;
208 dev_kfree_skb_any(skb);
209 tx_buf->first_bd = 0;
215 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
217 struct netdev_queue *txq;
218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
219 unsigned int pkts_compl = 0, bytes_compl = 0;
221 #ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
230 while (sw_cons != hw_cons) {
233 pkt_cons = TX_BD(sw_cons);
235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
240 &pkts_compl, &bytes_compl);
245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
272 __netif_tx_lock(txq, smp_processor_id());
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
277 netif_tx_wake_queue(txq);
279 __netif_tx_unlock(txq);
284 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
287 u16 last_max = fp->last_max_sge;
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
293 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
295 struct eth_end_agg_rx_cqe *cqe)
297 struct bnx2x *bp = fp->bp;
298 u16 last_max, last_elem, first_elem;
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
315 bnx2x_update_last_max_sge(fp,
316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
318 last_max = RX_SGE(fp->last_max_sge);
319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
346 /* Get Toeplitz hash value in the skb using the value from the
347 * CQE (calculated by HW).
349 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
350 const struct eth_fast_path_rx_cqe *cqe,
353 /* Get Toeplitz hash from CQE */
354 if ((bp->dev->features & NETIF_F_RXHASH) &&
355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
361 return le32_to_cpu(cqe->rss_hash_result);
367 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
369 struct eth_fast_path_rx_cqe *cqe)
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
383 /* Try to map an empty data buffer from the aggregation info */
384 mapping = dma_map_single(&bp->pdev->dev,
385 first_buf->data + NET_SKB_PAD,
386 fp->rx_buf_size, DMA_FROM_DEVICE);
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
395 bnx2x_reuse_rx_data(fp, cons, prod);
396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
403 /* point prod_bd to new data */
404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
420 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
421 tpa_info->gro_size = gro_size;
424 #ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426 #ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
435 /* Timestamp option length allowed for TPA aggregation:
437 * nop nop kind length echo val
439 #define TPA_TSTAMP_OPT_LEN 12
441 * bnx2x_set_gro_params - compute GRO values
444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
447 * @pkt_len: length of all segments
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
451 * Compute number of aggregated segments, and gso_type.
453 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
457 /* TPA aggregation won't have either IP options or TCP options
458 * other than timestamp or IPv6 extension headers.
460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
463 PRS_FLAG_OVERETH_IPV6) {
464 hdrs_len += sizeof(struct ipv6hdr);
465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
467 hdrs_len += sizeof(struct iphdr);
468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
474 * Otherwise FW would close the aggregation.
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
487 static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488 struct bnx2x_fastpath *fp, u16 index)
490 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
495 if (unlikely(page == NULL)) {
496 BNX2X_ERR("Can't alloc sge\n");
500 mapping = dma_map_page(&bp->pdev->dev, page, 0,
501 SGE_PAGES, DMA_FROM_DEVICE);
502 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503 __free_pages(page, PAGES_PER_SGE_SHIFT);
504 BNX2X_ERR("Can't map sge\n");
509 dma_unmap_addr_set(sw_buf, mapping, mapping);
511 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
517 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
518 struct bnx2x_agg_info *tpa_info,
521 struct eth_end_agg_rx_cqe *cqe,
524 struct sw_rx_page *rx_pg, old_rx_pg;
525 u32 i, frag_len, frag_size;
526 int err, j, frag_id = 0;
527 u16 len_on_bd = tpa_info->len_on_bd;
528 u16 full_page = 0, gro_size = 0;
530 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
532 if (fp->mode == TPA_MODE_GRO) {
533 gro_size = tpa_info->gro_size;
534 full_page = tpa_info->full_page;
537 /* This is needed in order to enable forwarding support */
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
543 #ifdef BNX2X_STOP_ON_ERROR
544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
562 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
564 rx_pg = &fp->rx_page_ring[sge_idx];
567 /* If we fail to allocate a substitute page, we simply stop
568 where we are and drop the whole packet */
569 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
571 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
575 /* Unmap the page as we r going to pass it to the stack */
576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping),
578 SGE_PAGES, DMA_FROM_DEVICE);
579 /* Add one frag and update the appropriate fields in the skb */
580 if (fp->mode == TPA_MODE_LRO)
581 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
585 for (rem = frag_len; rem > 0; rem -= gro_size) {
586 int len = rem > gro_size ? gro_size : rem;
587 skb_fill_page_desc(skb, frag_id++,
588 old_rx_pg.page, offset, len);
590 get_page(old_rx_pg.page);
595 skb->data_len += frag_len;
596 skb->truesize += SGE_PAGES;
597 skb->len += frag_len;
599 frag_size -= frag_len;
605 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
607 if (fp->rx_frag_size)
608 put_page(virt_to_head_page(data));
613 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
615 if (fp->rx_frag_size)
616 return netdev_alloc_frag(fp->rx_frag_size);
618 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
622 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
624 const struct iphdr *iph = ip_hdr(skb);
627 skb_set_transport_header(skb, sizeof(struct iphdr));
630 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631 iph->saddr, iph->daddr, 0);
634 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
636 struct ipv6hdr *iph = ipv6_hdr(skb);
639 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643 &iph->saddr, &iph->daddr, 0);
647 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
651 if (skb_shinfo(skb)->gso_size) {
652 skb_set_network_header(skb, 0);
653 switch (be16_to_cpu(skb->protocol)) {
655 bnx2x_gro_ip_csum(bp, skb);
658 bnx2x_gro_ipv6_csum(bp, skb);
661 BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
662 be16_to_cpu(skb->protocol));
664 tcp_gro_complete(skb);
667 napi_gro_receive(&fp->napi, skb);
670 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
671 struct bnx2x_agg_info *tpa_info,
673 struct eth_end_agg_rx_cqe *cqe,
676 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
677 u8 pad = tpa_info->placement_offset;
678 u16 len = tpa_info->len_on_bd;
679 struct sk_buff *skb = NULL;
680 u8 *new_data, *data = rx_buf->data;
681 u8 old_tpa_state = tpa_info->tpa_state;
683 tpa_info->tpa_state = BNX2X_TPA_STOP;
685 /* If we there was an error during the handling of the TPA_START -
686 * drop this aggregation.
688 if (old_tpa_state == BNX2X_TPA_ERROR)
691 /* Try to allocate the new data */
692 new_data = bnx2x_frag_alloc(fp);
693 /* Unmap skb in the pool anyway, as we are going to change
694 pool entry status to BNX2X_TPA_STOP even if new skb allocation
696 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
697 fp->rx_buf_size, DMA_FROM_DEVICE);
698 if (likely(new_data))
699 skb = build_skb(data, fp->rx_frag_size);
702 #ifdef BNX2X_STOP_ON_ERROR
703 if (pad + len > fp->rx_buf_size) {
704 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
705 pad, len, fp->rx_buf_size);
711 skb_reserve(skb, pad + NET_SKB_PAD);
713 skb->rxhash = tpa_info->rxhash;
714 skb->l4_rxhash = tpa_info->l4_rxhash;
716 skb->protocol = eth_type_trans(skb, bp->dev);
717 skb->ip_summed = CHECKSUM_UNNECESSARY;
719 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
720 skb, cqe, cqe_idx)) {
721 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
722 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
723 bnx2x_gro_receive(bp, fp, skb);
725 DP(NETIF_MSG_RX_STATUS,
726 "Failed to allocate new pages - dropping packet!\n");
727 dev_kfree_skb_any(skb);
731 /* put new data in bin */
732 rx_buf->data = new_data;
736 bnx2x_frag_free(fp, new_data);
738 /* drop the packet and keep the buffer in the bin */
739 DP(NETIF_MSG_RX_STATUS,
740 "Failed to allocate or map a new skb - dropping packet!\n");
741 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
744 static int bnx2x_alloc_rx_data(struct bnx2x *bp,
745 struct bnx2x_fastpath *fp, u16 index)
748 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
749 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
752 data = bnx2x_frag_alloc(fp);
753 if (unlikely(data == NULL))
756 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
759 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
760 bnx2x_frag_free(fp, data);
761 BNX2X_ERR("Can't map rx data\n");
766 dma_unmap_addr_set(rx_buf, mapping, mapping);
768 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
769 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
775 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
776 struct bnx2x_fastpath *fp,
777 struct bnx2x_eth_q_stats *qstats)
779 /* Do nothing if no L4 csum validation was done.
780 * We do not check whether IP csum was validated. For IPv4 we assume
781 * that if the card got as far as validating the L4 csum, it also
782 * validated the IP csum. IPv6 has no IP csum.
784 if (cqe->fast_path_cqe.status_flags &
785 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
788 /* If L4 validation was done, check if an error was found. */
790 if (cqe->fast_path_cqe.type_error_flags &
791 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
792 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
793 qstats->hw_csum_err++;
795 skb->ip_summed = CHECKSUM_UNNECESSARY;
798 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
800 struct bnx2x *bp = fp->bp;
801 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
802 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
805 #ifdef BNX2X_STOP_ON_ERROR
806 if (unlikely(bp->panic))
810 /* CQ "next element" is of the size of the regular element,
811 that's why it's ok here */
812 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
813 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
816 bd_cons = fp->rx_bd_cons;
817 bd_prod = fp->rx_bd_prod;
818 bd_prod_fw = bd_prod;
819 sw_comp_cons = fp->rx_comp_cons;
820 sw_comp_prod = fp->rx_comp_prod;
822 /* Memory barrier necessary as speculative reads of the rx
823 * buffer can be ahead of the index in the status block
827 DP(NETIF_MSG_RX_STATUS,
828 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
829 fp->index, hw_comp_cons, sw_comp_cons);
831 while (sw_comp_cons != hw_comp_cons) {
832 struct sw_rx_bd *rx_buf = NULL;
834 union eth_rx_cqe *cqe;
835 struct eth_fast_path_rx_cqe *cqe_fp;
837 enum eth_rx_cqe_type cqe_fp_type;
842 #ifdef BNX2X_STOP_ON_ERROR
843 if (unlikely(bp->panic))
847 comp_ring_cons = RCQ_BD(sw_comp_cons);
848 bd_prod = RX_BD(bd_prod);
849 bd_cons = RX_BD(bd_cons);
851 cqe = &fp->rx_comp_ring[comp_ring_cons];
852 cqe_fp = &cqe->fast_path_cqe;
853 cqe_fp_flags = cqe_fp->type_error_flags;
854 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
856 DP(NETIF_MSG_RX_STATUS,
857 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
858 CQE_TYPE(cqe_fp_flags),
859 cqe_fp_flags, cqe_fp->status_flags,
860 le32_to_cpu(cqe_fp->rss_hash_result),
861 le16_to_cpu(cqe_fp->vlan_tag),
862 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
864 /* is this a slowpath msg? */
865 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
866 bnx2x_sp_event(fp, cqe);
870 rx_buf = &fp->rx_buf_ring[bd_cons];
873 if (!CQE_TYPE_FAST(cqe_fp_type)) {
874 struct bnx2x_agg_info *tpa_info;
875 u16 frag_size, pages;
876 #ifdef BNX2X_STOP_ON_ERROR
878 if (fp->disable_tpa &&
879 (CQE_TYPE_START(cqe_fp_type) ||
880 CQE_TYPE_STOP(cqe_fp_type)))
881 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
882 CQE_TYPE(cqe_fp_type));
885 if (CQE_TYPE_START(cqe_fp_type)) {
886 u16 queue = cqe_fp->queue_index;
887 DP(NETIF_MSG_RX_STATUS,
888 "calling tpa_start on queue %d\n",
891 bnx2x_tpa_start(fp, queue,
898 queue = cqe->end_agg_cqe.queue_index;
899 tpa_info = &fp->tpa_info[queue];
900 DP(NETIF_MSG_RX_STATUS,
901 "calling tpa_stop on queue %d\n",
904 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
907 if (fp->mode == TPA_MODE_GRO)
908 pages = (frag_size + tpa_info->full_page - 1) /
911 pages = SGE_PAGE_ALIGN(frag_size) >>
914 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
915 &cqe->end_agg_cqe, comp_ring_cons);
916 #ifdef BNX2X_STOP_ON_ERROR
921 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
925 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
926 pad = cqe_fp->placement_offset;
927 dma_sync_single_for_cpu(&bp->pdev->dev,
928 dma_unmap_addr(rx_buf, mapping),
929 pad + RX_COPY_THRESH,
932 prefetch(data + pad); /* speedup eth_type_trans() */
933 /* is this an error packet? */
934 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
935 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
936 "ERROR flags %x rx packet %u\n",
937 cqe_fp_flags, sw_comp_cons);
938 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
942 /* Since we don't have a jumbo ring
943 * copy small packets if mtu > 1500
945 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
946 (len <= RX_COPY_THRESH)) {
947 skb = netdev_alloc_skb_ip_align(bp->dev, len);
949 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
950 "ERROR packet dropped because of alloc failure\n");
951 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
954 memcpy(skb->data, data + pad, len);
955 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
957 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
958 dma_unmap_single(&bp->pdev->dev,
959 dma_unmap_addr(rx_buf, mapping),
962 skb = build_skb(data, fp->rx_frag_size);
963 if (unlikely(!skb)) {
964 bnx2x_frag_free(fp, data);
965 bnx2x_fp_qstats(bp, fp)->
966 rx_skb_alloc_failed++;
969 skb_reserve(skb, pad);
971 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
972 "ERROR packet dropped because of alloc failure\n");
973 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
975 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
981 skb->protocol = eth_type_trans(skb, bp->dev);
983 /* Set Toeplitz hash for a none-LRO skb */
984 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
985 skb->l4_rxhash = l4_rxhash;
987 skb_checksum_none_assert(skb);
989 if (bp->dev->features & NETIF_F_RXCSUM)
990 bnx2x_csum_validate(skb, cqe, fp,
991 bnx2x_fp_qstats(bp, fp));
993 skb_record_rx_queue(skb, fp->rx_queue);
995 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
997 __vlan_hwaccel_put_tag(skb,
998 le16_to_cpu(cqe_fp->vlan_tag));
999 napi_gro_receive(&fp->napi, skb);
1003 rx_buf->data = NULL;
1005 bd_cons = NEXT_RX_IDX(bd_cons);
1006 bd_prod = NEXT_RX_IDX(bd_prod);
1007 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1010 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1011 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1013 if (rx_pkt == budget)
1017 fp->rx_bd_cons = bd_cons;
1018 fp->rx_bd_prod = bd_prod_fw;
1019 fp->rx_comp_cons = sw_comp_cons;
1020 fp->rx_comp_prod = sw_comp_prod;
1022 /* Update producers */
1023 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1026 fp->rx_pkt += rx_pkt;
1032 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1034 struct bnx2x_fastpath *fp = fp_cookie;
1035 struct bnx2x *bp = fp->bp;
1039 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1040 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1041 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1043 #ifdef BNX2X_STOP_ON_ERROR
1044 if (unlikely(bp->panic))
1048 /* Handle Rx and Tx according to MSI-X vector */
1049 prefetch(fp->rx_cons_sb);
1051 for_each_cos_in_tx_queue(fp, cos)
1052 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1054 prefetch(&fp->sb_running_index[SM_RX_ID]);
1055 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1060 /* HW Lock for shared dual port PHYs */
1061 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1063 mutex_lock(&bp->port.phy_mutex);
1065 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1068 void bnx2x_release_phy_lock(struct bnx2x *bp)
1070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1072 mutex_unlock(&bp->port.phy_mutex);
1075 /* calculates MF speed according to current linespeed and MF configuration */
1076 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1078 u16 line_speed = bp->link_vars.line_speed;
1080 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1081 bp->mf_config[BP_VN(bp)]);
1083 /* Calculate the current MAX line speed limit for the MF
1087 line_speed = (line_speed * maxCfg) / 100;
1088 else { /* SD mode */
1089 u16 vn_max_rate = maxCfg * 100;
1091 if (vn_max_rate < line_speed)
1092 line_speed = vn_max_rate;
1100 * bnx2x_fill_report_data - fill link report data to report
1102 * @bp: driver handle
1103 * @data: link state to update
1105 * It uses a none-atomic bit operations because is called under the mutex.
1107 static void bnx2x_fill_report_data(struct bnx2x *bp,
1108 struct bnx2x_link_report_data *data)
1110 u16 line_speed = bnx2x_get_mf_speed(bp);
1112 memset(data, 0, sizeof(*data));
1114 /* Fill the report data: efective line speed */
1115 data->line_speed = line_speed;
1118 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1119 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1120 &data->link_report_flags);
1123 if (bp->link_vars.duplex == DUPLEX_FULL)
1124 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1126 /* Rx Flow Control is ON */
1127 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1128 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1130 /* Tx Flow Control is ON */
1131 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1132 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1136 * bnx2x_link_report - report link status to OS.
1138 * @bp: driver handle
1140 * Calls the __bnx2x_link_report() under the same locking scheme
1141 * as a link/PHY state managing code to ensure a consistent link
1145 void bnx2x_link_report(struct bnx2x *bp)
1147 bnx2x_acquire_phy_lock(bp);
1148 __bnx2x_link_report(bp);
1149 bnx2x_release_phy_lock(bp);
1153 * __bnx2x_link_report - report link status to OS.
1155 * @bp: driver handle
1157 * None atomic inmlementation.
1158 * Should be called under the phy_lock.
1160 void __bnx2x_link_report(struct bnx2x *bp)
1162 struct bnx2x_link_report_data cur_data;
1165 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1166 bnx2x_read_mf_cfg(bp);
1168 /* Read the current link report info */
1169 bnx2x_fill_report_data(bp, &cur_data);
1171 /* Don't report link down or exactly the same link status twice */
1172 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1173 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1174 &bp->last_reported_link.link_report_flags) &&
1175 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1176 &cur_data.link_report_flags)))
1181 /* We are going to report a new link parameters now -
1182 * remember the current data for the next time.
1184 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1186 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1187 &cur_data.link_report_flags)) {
1188 netif_carrier_off(bp->dev);
1189 netdev_err(bp->dev, "NIC Link is Down\n");
1195 netif_carrier_on(bp->dev);
1197 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1198 &cur_data.link_report_flags))
1203 /* Handle the FC at the end so that only these flags would be
1204 * possibly set. This way we may easily check if there is no FC
1207 if (cur_data.link_report_flags) {
1208 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1209 &cur_data.link_report_flags)) {
1210 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1211 &cur_data.link_report_flags))
1212 flow = "ON - receive & transmit";
1214 flow = "ON - receive";
1216 flow = "ON - transmit";
1221 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1222 cur_data.line_speed, duplex, flow);
1226 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1230 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1231 struct eth_rx_sge *sge;
1233 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1235 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1236 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1239 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1240 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1244 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1245 struct bnx2x_fastpath *fp, int last)
1249 for (i = 0; i < last; i++) {
1250 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1251 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1252 u8 *data = first_buf->data;
1255 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1258 if (tpa_info->tpa_state == BNX2X_TPA_START)
1259 dma_unmap_single(&bp->pdev->dev,
1260 dma_unmap_addr(first_buf, mapping),
1261 fp->rx_buf_size, DMA_FROM_DEVICE);
1262 bnx2x_frag_free(fp, data);
1263 first_buf->data = NULL;
1267 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1271 for_each_rx_queue_cnic(bp, j) {
1272 struct bnx2x_fastpath *fp = &bp->fp[j];
1276 /* Activate BD ring */
1278 * this will generate an interrupt (to the TSTORM)
1279 * must only be done after chip is initialized
1281 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1286 void bnx2x_init_rx_rings(struct bnx2x *bp)
1288 int func = BP_FUNC(bp);
1292 /* Allocate TPA resources */
1293 for_each_eth_queue(bp, j) {
1294 struct bnx2x_fastpath *fp = &bp->fp[j];
1297 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1299 if (!fp->disable_tpa) {
1300 /* Fill the per-aggregtion pool */
1301 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1302 struct bnx2x_agg_info *tpa_info =
1304 struct sw_rx_bd *first_buf =
1305 &tpa_info->first_buf;
1307 first_buf->data = bnx2x_frag_alloc(fp);
1308 if (!first_buf->data) {
1309 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1311 bnx2x_free_tpa_pool(bp, fp, i);
1312 fp->disable_tpa = 1;
1315 dma_unmap_addr_set(first_buf, mapping, 0);
1316 tpa_info->tpa_state = BNX2X_TPA_STOP;
1319 /* "next page" elements initialization */
1320 bnx2x_set_next_page_sgl(fp);
1322 /* set SGEs bit mask */
1323 bnx2x_init_sge_ring_bit_mask(fp);
1325 /* Allocate SGEs and initialize the ring elements */
1326 for (i = 0, ring_prod = 0;
1327 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1329 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1330 BNX2X_ERR("was only able to allocate %d rx sges\n",
1332 BNX2X_ERR("disabling TPA for queue[%d]\n",
1334 /* Cleanup already allocated elements */
1335 bnx2x_free_rx_sge_range(bp, fp,
1337 bnx2x_free_tpa_pool(bp, fp,
1339 fp->disable_tpa = 1;
1343 ring_prod = NEXT_SGE_IDX(ring_prod);
1346 fp->rx_sge_prod = ring_prod;
1350 for_each_eth_queue(bp, j) {
1351 struct bnx2x_fastpath *fp = &bp->fp[j];
1355 /* Activate BD ring */
1357 * this will generate an interrupt (to the TSTORM)
1358 * must only be done after chip is initialized
1360 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1366 if (CHIP_IS_E1(bp)) {
1367 REG_WR(bp, BAR_USTRORM_INTMEM +
1368 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1369 U64_LO(fp->rx_comp_mapping));
1370 REG_WR(bp, BAR_USTRORM_INTMEM +
1371 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1372 U64_HI(fp->rx_comp_mapping));
1377 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1380 struct bnx2x *bp = fp->bp;
1382 for_each_cos_in_tx_queue(fp, cos) {
1383 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1384 unsigned pkts_compl = 0, bytes_compl = 0;
1386 u16 sw_prod = txdata->tx_pkt_prod;
1387 u16 sw_cons = txdata->tx_pkt_cons;
1389 while (sw_cons != sw_prod) {
1390 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1391 &pkts_compl, &bytes_compl);
1395 netdev_tx_reset_queue(
1396 netdev_get_tx_queue(bp->dev,
1397 txdata->txq_index));
1401 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1405 for_each_tx_queue_cnic(bp, i) {
1406 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1410 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1414 for_each_eth_queue(bp, i) {
1415 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1419 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1421 struct bnx2x *bp = fp->bp;
1424 /* ring wasn't allocated */
1425 if (fp->rx_buf_ring == NULL)
1428 for (i = 0; i < NUM_RX_BD; i++) {
1429 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1430 u8 *data = rx_buf->data;
1434 dma_unmap_single(&bp->pdev->dev,
1435 dma_unmap_addr(rx_buf, mapping),
1436 fp->rx_buf_size, DMA_FROM_DEVICE);
1438 rx_buf->data = NULL;
1439 bnx2x_frag_free(fp, data);
1443 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1447 for_each_rx_queue_cnic(bp, j) {
1448 bnx2x_free_rx_bds(&bp->fp[j]);
1452 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1456 for_each_eth_queue(bp, j) {
1457 struct bnx2x_fastpath *fp = &bp->fp[j];
1459 bnx2x_free_rx_bds(fp);
1461 if (!fp->disable_tpa)
1462 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1466 void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1468 bnx2x_free_tx_skbs_cnic(bp);
1469 bnx2x_free_rx_skbs_cnic(bp);
1472 void bnx2x_free_skbs(struct bnx2x *bp)
1474 bnx2x_free_tx_skbs(bp);
1475 bnx2x_free_rx_skbs(bp);
1478 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1480 /* load old values */
1481 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1483 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1484 /* leave all but MAX value */
1485 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1487 /* set new MAX value */
1488 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1489 & FUNC_MF_CFG_MAX_BW_MASK;
1491 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1496 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1498 * @bp: driver handle
1499 * @nvecs: number of vectors to be released
1501 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1505 if (nvecs == offset)
1508 /* VFs don't have a default SB */
1510 free_irq(bp->msix_table[offset].vector, bp->dev);
1511 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1512 bp->msix_table[offset].vector);
1516 if (CNIC_SUPPORT(bp)) {
1517 if (nvecs == offset)
1522 for_each_eth_queue(bp, i) {
1523 if (nvecs == offset)
1525 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1526 i, bp->msix_table[offset].vector);
1528 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1532 void bnx2x_free_irq(struct bnx2x *bp)
1534 if (bp->flags & USING_MSIX_FLAG &&
1535 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1536 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1538 /* vfs don't have a default status block */
1542 bnx2x_free_msix_irqs(bp, nvecs);
1544 free_irq(bp->dev->irq, bp->dev);
1548 int bnx2x_enable_msix(struct bnx2x *bp)
1550 int msix_vec = 0, i, rc;
1552 /* VFs don't have a default status block */
1554 bp->msix_table[msix_vec].entry = msix_vec;
1555 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1556 bp->msix_table[0].entry);
1560 /* Cnic requires an msix vector for itself */
1561 if (CNIC_SUPPORT(bp)) {
1562 bp->msix_table[msix_vec].entry = msix_vec;
1563 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1564 msix_vec, bp->msix_table[msix_vec].entry);
1568 /* We need separate vectors for ETH queues only (not FCoE) */
1569 for_each_eth_queue(bp, i) {
1570 bp->msix_table[msix_vec].entry = msix_vec;
1571 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1572 msix_vec, msix_vec, i);
1576 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1579 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
1582 * reconfigure number of tx/rx queues according to available
1585 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1586 /* how less vectors we will have? */
1587 int diff = msix_vec - rc;
1589 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1591 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1594 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1598 * decrease number of queues by number of unallocated entries
1600 bp->num_ethernet_queues -= diff;
1601 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1603 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1605 } else if (rc > 0) {
1606 /* Get by with single vector */
1607 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1609 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1614 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1615 bp->flags |= USING_SINGLE_MSIX_FLAG;
1617 BNX2X_DEV_INFO("set number of queues to 1\n");
1618 bp->num_ethernet_queues = 1;
1619 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1620 } else if (rc < 0) {
1621 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1625 bp->flags |= USING_MSIX_FLAG;
1630 /* fall to INTx if not enough memory */
1632 bp->flags |= DISABLE_MSI_FLAG;
1637 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1639 int i, rc, offset = 0;
1641 /* no default status block for vf */
1643 rc = request_irq(bp->msix_table[offset++].vector,
1644 bnx2x_msix_sp_int, 0,
1645 bp->dev->name, bp->dev);
1647 BNX2X_ERR("request sp irq failed\n");
1652 if (CNIC_SUPPORT(bp))
1655 for_each_eth_queue(bp, i) {
1656 struct bnx2x_fastpath *fp = &bp->fp[i];
1657 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1660 rc = request_irq(bp->msix_table[offset].vector,
1661 bnx2x_msix_fp_int, 0, fp->name, fp);
1663 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1664 bp->msix_table[offset].vector, rc);
1665 bnx2x_free_msix_irqs(bp, offset);
1672 i = BNX2X_NUM_ETH_QUEUES(bp);
1674 offset = 1 + CNIC_SUPPORT(bp);
1675 netdev_info(bp->dev,
1676 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1677 bp->msix_table[0].vector,
1678 0, bp->msix_table[offset].vector,
1679 i - 1, bp->msix_table[offset + i - 1].vector);
1681 offset = CNIC_SUPPORT(bp);
1682 netdev_info(bp->dev,
1683 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1684 0, bp->msix_table[offset].vector,
1685 i - 1, bp->msix_table[offset + i - 1].vector);
1690 int bnx2x_enable_msi(struct bnx2x *bp)
1694 rc = pci_enable_msi(bp->pdev);
1696 BNX2X_DEV_INFO("MSI is not attainable\n");
1699 bp->flags |= USING_MSI_FLAG;
1704 static int bnx2x_req_irq(struct bnx2x *bp)
1706 unsigned long flags;
1709 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1712 flags = IRQF_SHARED;
1714 if (bp->flags & USING_MSIX_FLAG)
1715 irq = bp->msix_table[0].vector;
1717 irq = bp->pdev->irq;
1719 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1722 static int bnx2x_setup_irqs(struct bnx2x *bp)
1725 if (bp->flags & USING_MSIX_FLAG &&
1726 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1727 rc = bnx2x_req_msix_irqs(bp);
1731 rc = bnx2x_req_irq(bp);
1733 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1736 if (bp->flags & USING_MSI_FLAG) {
1737 bp->dev->irq = bp->pdev->irq;
1738 netdev_info(bp->dev, "using MSI IRQ %d\n",
1741 if (bp->flags & USING_MSIX_FLAG) {
1742 bp->dev->irq = bp->msix_table[0].vector;
1743 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1751 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1755 for_each_rx_queue_cnic(bp, i)
1756 napi_enable(&bnx2x_fp(bp, i, napi));
1759 static void bnx2x_napi_enable(struct bnx2x *bp)
1763 for_each_eth_queue(bp, i)
1764 napi_enable(&bnx2x_fp(bp, i, napi));
1767 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1771 for_each_rx_queue_cnic(bp, i)
1772 napi_disable(&bnx2x_fp(bp, i, napi));
1775 static void bnx2x_napi_disable(struct bnx2x *bp)
1779 for_each_eth_queue(bp, i)
1780 napi_disable(&bnx2x_fp(bp, i, napi));
1783 void bnx2x_netif_start(struct bnx2x *bp)
1785 if (netif_running(bp->dev)) {
1786 bnx2x_napi_enable(bp);
1787 if (CNIC_LOADED(bp))
1788 bnx2x_napi_enable_cnic(bp);
1789 bnx2x_int_enable(bp);
1790 if (bp->state == BNX2X_STATE_OPEN)
1791 netif_tx_wake_all_queues(bp->dev);
1795 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1797 bnx2x_int_disable_sync(bp, disable_hw);
1798 bnx2x_napi_disable(bp);
1799 if (CNIC_LOADED(bp))
1800 bnx2x_napi_disable_cnic(bp);
1803 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1805 struct bnx2x *bp = netdev_priv(dev);
1807 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1808 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1809 u16 ether_type = ntohs(hdr->h_proto);
1811 /* Skip VLAN tag if present */
1812 if (ether_type == ETH_P_8021Q) {
1813 struct vlan_ethhdr *vhdr =
1814 (struct vlan_ethhdr *)skb->data;
1816 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1819 /* If ethertype is FCoE or FIP - use FCoE ring */
1820 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1821 return bnx2x_fcoe_tx(bp, txq_index);
1824 /* select a non-FCoE queue */
1825 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1828 void bnx2x_set_num_queues(struct bnx2x *bp)
1831 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1833 /* override in STORAGE SD modes */
1834 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1835 bp->num_ethernet_queues = 1;
1837 /* Add special queues */
1838 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1839 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1841 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1845 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1847 * @bp: Driver handle
1849 * We currently support for at most 16 Tx queues for each CoS thus we will
1850 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1853 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1854 * index after all ETH L2 indices.
1856 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1857 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1858 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1860 * The proper configuration of skb->queue_mapping is handled by
1861 * bnx2x_select_queue() and __skb_tx_hash().
1863 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1864 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1866 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1870 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1871 rx = BNX2X_NUM_ETH_QUEUES(bp);
1873 /* account for fcoe queue */
1874 if (include_cnic && !NO_FCOE(bp)) {
1879 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1881 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1884 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1886 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1890 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1896 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1900 for_each_queue(bp, i) {
1901 struct bnx2x_fastpath *fp = &bp->fp[i];
1904 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1907 * Although there are no IP frames expected to arrive to
1908 * this ring we still want to add an
1909 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1912 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1915 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1916 IP_HEADER_ALIGNMENT_PADDING +
1919 BNX2X_FW_RX_ALIGN_END;
1920 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1921 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1922 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1924 fp->rx_frag_size = 0;
1928 static int bnx2x_init_rss_pf(struct bnx2x *bp)
1931 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1933 /* Prepare the initial contents fo the indirection table if RSS is
1936 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1937 bp->rss_conf_obj.ind_table[i] =
1939 ethtool_rxfh_indir_default(i, num_eth_queues);
1942 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1943 * per-port, so if explicit configuration is needed , do it only
1946 * For 57712 and newer on the other hand it's a per-function
1949 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
1952 int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1955 struct bnx2x_config_rss_params params = {NULL};
1957 /* Although RSS is meaningless when there is a single HW queue we
1958 * still need it enabled in order to have HW Rx hash generated.
1960 * if (!is_eth_multi(bp))
1961 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1964 params.rss_obj = rss_obj;
1966 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1968 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1970 /* RSS configuration */
1971 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1972 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1973 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1974 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1975 if (rss_obj->udp_rss_v4)
1976 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
1977 if (rss_obj->udp_rss_v6)
1978 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
1981 params.rss_result_mask = MULTI_MASK;
1983 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
1987 prandom_bytes(params.rss_key, sizeof(params.rss_key));
1988 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1991 return bnx2x_config_rss(bp, ¶ms);
1994 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1996 struct bnx2x_func_state_params func_params = {NULL};
1998 /* Prepare parameters for function state transitions */
1999 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2001 func_params.f_obj = &bp->func_obj;
2002 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2004 func_params.params.hw_init.load_phase = load_code;
2006 return bnx2x_func_state_change(bp, &func_params);
2010 * Cleans the object that have internal lists without sending
2011 * ramrods. Should be run when interrutps are disabled.
2013 static void bnx2x_squeeze_objects(struct bnx2x *bp)
2016 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2017 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2018 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2020 /***************** Cleanup MACs' object first *************************/
2022 /* Wait for completion of requested */
2023 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2024 /* Perform a dry cleanup */
2025 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2027 /* Clean ETH primary MAC */
2028 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2029 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2032 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2034 /* Cleanup UC list */
2036 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2037 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2040 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2042 /***************** Now clean mcast object *****************************/
2043 rparam.mcast_obj = &bp->mcast_obj;
2044 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2046 /* Add a DEL command... */
2047 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2049 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2052 /* ...and wait until all pending commands are cleared */
2053 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2056 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2061 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2065 #ifndef BNX2X_STOP_ON_ERROR
2066 #define LOAD_ERROR_EXIT(bp, label) \
2068 (bp)->state = BNX2X_STATE_ERROR; \
2072 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2074 bp->cnic_loaded = false; \
2077 #else /*BNX2X_STOP_ON_ERROR*/
2078 #define LOAD_ERROR_EXIT(bp, label) \
2080 (bp)->state = BNX2X_STATE_ERROR; \
2084 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2086 bp->cnic_loaded = false; \
2090 #endif /*BNX2X_STOP_ON_ERROR*/
2092 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2094 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2095 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2099 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2101 int num_groups, vf_headroom = 0;
2102 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2104 /* number of queues for statistics is number of eth queues + FCoE */
2105 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2107 /* Total number of FW statistics requests =
2108 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2109 * and fcoe l2 queue) stats + num of queues (which includes another 1
2110 * for fcoe l2 queue if applicable)
2112 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2114 /* vf stats appear in the request list, but their data is allocated by
2115 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2116 * it is used to determine where to place the vf stats queries in the
2120 vf_headroom = bnx2x_vf_headroom(bp);
2122 /* Request is built from stats_query_header and an array of
2123 * stats_query_cmd_group each of which contains
2124 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2125 * configured in the stats_query_header.
2128 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2129 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2132 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2133 bp->fw_stats_num, vf_headroom, num_groups);
2134 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2135 num_groups * sizeof(struct stats_query_cmd_group);
2137 /* Data for statistics requests + stats_counter
2138 * stats_counter holds per-STORM counters that are incremented
2139 * when STORM has finished with the current request.
2140 * memory for FCoE offloaded statistics are counted anyway,
2141 * even if they will not be sent.
2142 * VF stats are not accounted for here as the data of VF stats is stored
2143 * in memory allocated by the VF, not here.
2145 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2146 sizeof(struct per_pf_stats) +
2147 sizeof(struct fcoe_statistics_params) +
2148 sizeof(struct per_queue_stats) * num_queue_stats +
2149 sizeof(struct stats_counter);
2151 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2152 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2155 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2156 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2157 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2158 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2159 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2160 bp->fw_stats_req_sz;
2162 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2163 U64_HI(bp->fw_stats_req_mapping),
2164 U64_LO(bp->fw_stats_req_mapping));
2165 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2166 U64_HI(bp->fw_stats_data_mapping),
2167 U64_LO(bp->fw_stats_data_mapping));
2171 bnx2x_free_fw_stats_mem(bp);
2172 BNX2X_ERR("Can't allocate FW stats memory\n");
2176 /* send load request to mcp and analyze response */
2177 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2181 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2182 DRV_MSG_SEQ_NUMBER_MASK);
2183 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2185 /* Get current FW pulse sequence */
2186 bp->fw_drv_pulse_wr_seq =
2187 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2188 DRV_PULSE_SEQ_MASK);
2189 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2192 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2193 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2195 /* if mcp fails to respond we must abort */
2196 if (!(*load_code)) {
2197 BNX2X_ERR("MCP response failure, aborting\n");
2201 /* If mcp refused (e.g. other port is in diagnostic mode) we
2204 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2205 BNX2X_ERR("MCP refused load request, aborting\n");
2211 /* check whether another PF has already loaded FW to chip. In
2212 * virtualized environments a pf from another VM may have already
2213 * initialized the device including loading FW
2215 int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2217 /* is another pf loaded on this engine? */
2218 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2219 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2220 /* build my FW version dword */
2221 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2222 (BCM_5710_FW_MINOR_VERSION << 8) +
2223 (BCM_5710_FW_REVISION_VERSION << 16) +
2224 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2226 /* read loaded FW from chip */
2227 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2229 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2232 /* abort nic load if version mismatch */
2233 if (my_fw != loaded_fw) {
2234 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2242 /* returns the "mcp load_code" according to global load_count array */
2243 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2245 int path = BP_PATH(bp);
2247 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2248 path, load_count[path][0], load_count[path][1],
2249 load_count[path][2]);
2250 load_count[path][0]++;
2251 load_count[path][1 + port]++;
2252 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2253 path, load_count[path][0], load_count[path][1],
2254 load_count[path][2]);
2255 if (load_count[path][0] == 1)
2256 return FW_MSG_CODE_DRV_LOAD_COMMON;
2257 else if (load_count[path][1 + port] == 1)
2258 return FW_MSG_CODE_DRV_LOAD_PORT;
2260 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2263 /* mark PMF if applicable */
2264 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2266 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2267 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2268 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2270 /* We need the barrier to ensure the ordering between the
2271 * writing to bp->port.pmf here and reading it from the
2272 * bnx2x_periodic_task().
2279 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2282 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2284 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2285 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2286 (bp->common.shmem2_base)) {
2287 if (SHMEM2_HAS(bp, dcc_support))
2288 SHMEM2_WR(bp, dcc_support,
2289 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2290 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2291 if (SHMEM2_HAS(bp, afex_driver_support))
2292 SHMEM2_WR(bp, afex_driver_support,
2293 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2296 /* Set AFEX default VLAN tag to an invalid value */
2297 bp->afex_def_vlan_tag = -1;
2301 * bnx2x_bz_fp - zero content of the fastpath structure.
2303 * @bp: driver handle
2304 * @index: fastpath index to be zeroed
2306 * Makes sure the contents of the bp->fp[index].napi is kept
2309 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2311 struct bnx2x_fastpath *fp = &bp->fp[index];
2314 struct napi_struct orig_napi = fp->napi;
2315 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2316 /* bzero bnx2x_fastpath contents */
2318 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2319 sizeof(struct bnx2x_agg_info));
2320 memset(fp, 0, sizeof(*fp));
2322 /* Restore the NAPI object as it has been already initialized */
2323 fp->napi = orig_napi;
2324 fp->tpa_info = orig_tpa_info;
2328 fp->max_cos = bp->max_cos;
2330 /* Special queues support only one CoS */
2333 /* Init txdata pointers */
2335 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2337 for_each_cos_in_tx_queue(fp, cos)
2338 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2339 BNX2X_NUM_ETH_QUEUES(bp) + index];
2342 * set the tpa flag for each queue. The tpa flag determines the queue
2343 * minimal size so it must be set prior to queue memory allocation
2345 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2346 (bp->flags & GRO_ENABLE_FLAG &&
2347 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2348 if (bp->flags & TPA_ENABLE_FLAG)
2349 fp->mode = TPA_MODE_LRO;
2350 else if (bp->flags & GRO_ENABLE_FLAG)
2351 fp->mode = TPA_MODE_GRO;
2353 /* We don't want TPA on an FCoE L2 ring */
2355 fp->disable_tpa = 1;
2358 int bnx2x_load_cnic(struct bnx2x *bp)
2360 int i, rc, port = BP_PORT(bp);
2362 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2364 mutex_init(&bp->cnic_mutex);
2367 rc = bnx2x_alloc_mem_cnic(bp);
2369 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2370 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2374 rc = bnx2x_alloc_fp_mem_cnic(bp);
2376 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2377 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2380 /* Update the number of queues with the cnic queues */
2381 rc = bnx2x_set_real_num_queues(bp, 1);
2383 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2384 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2387 /* Add all CNIC NAPI objects */
2388 bnx2x_add_all_napi_cnic(bp);
2389 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2390 bnx2x_napi_enable_cnic(bp);
2392 rc = bnx2x_init_hw_func_cnic(bp);
2394 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2396 bnx2x_nic_init_cnic(bp);
2399 /* Enable Timer scan */
2400 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2402 /* setup cnic queues */
2403 for_each_cnic_queue(bp, i) {
2404 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2406 BNX2X_ERR("Queue setup failed\n");
2407 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2412 /* Initialize Rx filter. */
2413 netif_addr_lock_bh(bp->dev);
2414 bnx2x_set_rx_mode(bp->dev);
2415 netif_addr_unlock_bh(bp->dev);
2417 /* re-read iscsi info */
2418 bnx2x_get_iscsi_info(bp);
2419 bnx2x_setup_cnic_irq_info(bp);
2420 bnx2x_setup_cnic_info(bp);
2421 bp->cnic_loaded = true;
2422 if (bp->state == BNX2X_STATE_OPEN)
2423 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2426 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2430 #ifndef BNX2X_STOP_ON_ERROR
2432 /* Disable Timer scan */
2433 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2436 bnx2x_napi_disable_cnic(bp);
2437 /* Update the number of queues without the cnic queues */
2438 rc = bnx2x_set_real_num_queues(bp, 0);
2440 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2442 BNX2X_ERR("CNIC-related load failed\n");
2443 bnx2x_free_fp_mem_cnic(bp);
2444 bnx2x_free_mem_cnic(bp);
2446 #endif /* ! BNX2X_STOP_ON_ERROR */
2449 /* must be called with rtnl_lock */
2450 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2452 int port = BP_PORT(bp);
2453 int i, rc = 0, load_code = 0;
2455 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2457 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2459 #ifdef BNX2X_STOP_ON_ERROR
2460 if (unlikely(bp->panic)) {
2461 BNX2X_ERR("Can't load NIC when there is panic\n");
2466 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2468 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2469 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2470 &bp->last_reported_link.link_report_flags);
2473 /* must be called before memory allocation and HW init */
2474 bnx2x_ilt_set_info(bp);
2477 * Zero fastpath structures preserving invariants like napi, which are
2478 * allocated only once, fp index, max_cos, bp pointer.
2479 * Also set fp->disable_tpa and txdata_ptr.
2481 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2482 for_each_queue(bp, i)
2484 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2485 bp->num_cnic_queues) *
2486 sizeof(struct bnx2x_fp_txdata));
2488 bp->fcoe_init = false;
2490 /* Set the receive queues buffer size */
2491 bnx2x_set_rx_buf_size(bp);
2494 rc = bnx2x_alloc_mem(bp);
2496 BNX2X_ERR("Unable to allocate bp memory\n");
2501 /* Allocated memory for FW statistics */
2502 if (bnx2x_alloc_fw_stats_mem(bp))
2503 LOAD_ERROR_EXIT(bp, load_error0);
2505 /* need to be done after alloc mem, since it's self adjusting to amount
2506 * of memory available for RSS queues
2508 rc = bnx2x_alloc_fp_mem(bp);
2510 BNX2X_ERR("Unable to allocate memory for fps\n");
2511 LOAD_ERROR_EXIT(bp, load_error0);
2514 /* request pf to initialize status blocks */
2516 rc = bnx2x_vfpf_init(bp);
2518 LOAD_ERROR_EXIT(bp, load_error0);
2521 /* As long as bnx2x_alloc_mem() may possibly update
2522 * bp->num_queues, bnx2x_set_real_num_queues() should always
2523 * come after it. At this stage cnic queues are not counted.
2525 rc = bnx2x_set_real_num_queues(bp, 0);
2527 BNX2X_ERR("Unable to set real_num_queues\n");
2528 LOAD_ERROR_EXIT(bp, load_error0);
2531 /* configure multi cos mappings in kernel.
2532 * this configuration may be overriden by a multi class queue discipline
2533 * or by a dcbx negotiation result.
2535 bnx2x_setup_tc(bp->dev, bp->max_cos);
2537 /* Add all NAPI objects */
2538 bnx2x_add_all_napi(bp);
2539 DP(NETIF_MSG_IFUP, "napi added\n");
2540 bnx2x_napi_enable(bp);
2543 /* set pf load just before approaching the MCP */
2544 bnx2x_set_pf_load(bp);
2546 /* if mcp exists send load request and analyze response */
2547 if (!BP_NOMCP(bp)) {
2548 /* attempt to load pf */
2549 rc = bnx2x_nic_load_request(bp, &load_code);
2551 LOAD_ERROR_EXIT(bp, load_error1);
2553 /* what did mcp say? */
2554 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2556 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2557 LOAD_ERROR_EXIT(bp, load_error2);
2560 load_code = bnx2x_nic_load_no_mcp(bp, port);
2563 /* mark pmf if applicable */
2564 bnx2x_nic_load_pmf(bp, load_code);
2566 /* Init Function state controlling object */
2567 bnx2x__init_func_obj(bp);
2570 rc = bnx2x_init_hw(bp, load_code);
2572 BNX2X_ERR("HW init failed, aborting\n");
2573 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2574 LOAD_ERROR_EXIT(bp, load_error2);
2578 /* Connect to IRQs */
2579 rc = bnx2x_setup_irqs(bp);
2581 BNX2X_ERR("setup irqs failed\n");
2583 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2584 LOAD_ERROR_EXIT(bp, load_error2);
2587 /* Setup NIC internals and enable interrupts */
2588 bnx2x_nic_init(bp, load_code);
2590 /* Init per-function objects */
2592 bnx2x_init_bp_objs(bp);
2593 bnx2x_iov_nic_init(bp);
2595 /* Set AFEX default VLAN tag to an invalid value */
2596 bp->afex_def_vlan_tag = -1;
2597 bnx2x_nic_load_afex_dcc(bp, load_code);
2598 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2599 rc = bnx2x_func_start(bp);
2601 BNX2X_ERR("Function start failed!\n");
2602 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2604 LOAD_ERROR_EXIT(bp, load_error3);
2607 /* Send LOAD_DONE command to MCP */
2608 if (!BP_NOMCP(bp)) {
2609 load_code = bnx2x_fw_command(bp,
2610 DRV_MSG_CODE_LOAD_DONE, 0);
2612 BNX2X_ERR("MCP response failure, aborting\n");
2614 LOAD_ERROR_EXIT(bp, load_error3);
2618 /* setup the leading queue */
2619 rc = bnx2x_setup_leading(bp);
2621 BNX2X_ERR("Setup leading failed!\n");
2622 LOAD_ERROR_EXIT(bp, load_error3);
2625 /* set up the rest of the queues */
2626 for_each_nondefault_eth_queue(bp, i) {
2627 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2629 BNX2X_ERR("Queue setup failed\n");
2630 LOAD_ERROR_EXIT(bp, load_error3);
2635 rc = bnx2x_init_rss_pf(bp);
2637 BNX2X_ERR("PF RSS init failed\n");
2638 LOAD_ERROR_EXIT(bp, load_error3);
2642 for_each_eth_queue(bp, i) {
2643 rc = bnx2x_vfpf_setup_q(bp, i);
2645 BNX2X_ERR("Queue setup failed\n");
2646 LOAD_ERROR_EXIT(bp, load_error3);
2651 /* Now when Clients are configured we are ready to work */
2652 bp->state = BNX2X_STATE_OPEN;
2654 /* Configure a ucast MAC */
2656 rc = bnx2x_set_eth_mac(bp, true);
2658 rc = bnx2x_vfpf_set_mac(bp);
2660 BNX2X_ERR("Setting Ethernet MAC failed\n");
2661 LOAD_ERROR_EXIT(bp, load_error3);
2664 if (IS_PF(bp) && bp->pending_max) {
2665 bnx2x_update_max_mf_config(bp, bp->pending_max);
2666 bp->pending_max = 0;
2670 rc = bnx2x_initial_phy_init(bp, load_mode);
2672 LOAD_ERROR_EXIT(bp, load_error3);
2674 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2676 /* Start fast path */
2678 /* Initialize Rx filter. */
2679 netif_addr_lock_bh(bp->dev);
2680 bnx2x_set_rx_mode(bp->dev);
2681 netif_addr_unlock_bh(bp->dev);
2684 switch (load_mode) {
2686 /* Tx queue should be only reenabled */
2687 netif_tx_wake_all_queues(bp->dev);
2691 netif_tx_start_all_queues(bp->dev);
2692 smp_mb__after_clear_bit();
2696 case LOAD_LOOPBACK_EXT:
2697 bp->state = BNX2X_STATE_DIAG;
2705 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2707 bnx2x__link_status_update(bp);
2709 /* start the timer */
2710 mod_timer(&bp->timer, jiffies + bp->current_interval);
2712 if (CNIC_ENABLED(bp))
2713 bnx2x_load_cnic(bp);
2715 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2716 /* mark driver is loaded in shmem2 */
2718 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2719 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2720 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2721 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2724 /* Wait for all pending SP commands to complete */
2725 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2726 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2727 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2731 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2732 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2733 bnx2x_dcbx_init(bp, false);
2735 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2739 #ifndef BNX2X_STOP_ON_ERROR
2742 bnx2x_int_disable_sync(bp, 1);
2744 /* Clean queueable objects */
2745 bnx2x_squeeze_objects(bp);
2748 /* Free SKBs, SGEs, TPA pool and driver internals */
2749 bnx2x_free_skbs(bp);
2750 for_each_rx_queue(bp, i)
2751 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2756 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2757 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2758 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2763 bnx2x_napi_disable(bp);
2764 bnx2x_del_all_napi(bp);
2766 /* clear pf_load status, as it was already set */
2768 bnx2x_clear_pf_load(bp);
2770 bnx2x_free_fp_mem(bp);
2771 bnx2x_free_fw_stats_mem(bp);
2775 #endif /* ! BNX2X_STOP_ON_ERROR */
2778 static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2782 /* Wait until tx fastpath tasks complete */
2783 for_each_tx_queue(bp, i) {
2784 struct bnx2x_fastpath *fp = &bp->fp[i];
2786 for_each_cos_in_tx_queue(fp, cos)
2787 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2794 /* must be called with rtnl_lock */
2795 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2798 bool global = false;
2800 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2802 /* mark driver is unloaded in shmem2 */
2803 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2805 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2806 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2807 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2810 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2811 (bp->state == BNX2X_STATE_CLOSED ||
2812 bp->state == BNX2X_STATE_ERROR)) {
2813 /* We can get here if the driver has been unloaded
2814 * during parity error recovery and is either waiting for a
2815 * leader to complete or for other functions to unload and
2816 * then ifdown has been issued. In this case we want to
2817 * unload and let other functions to complete a recovery
2820 bp->recovery_state = BNX2X_RECOVERY_DONE;
2822 bnx2x_release_leader_lock(bp);
2825 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2826 BNX2X_ERR("Can't unload in closed or error state\n");
2830 /* Nothing to do during unload if previous bnx2x_nic_load()
2831 * have not completed succesfully - all resourses are released.
2833 * we can get here only after unsuccessful ndo_* callback, during which
2834 * dev->IFF_UP flag is still on.
2836 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2839 /* It's important to set the bp->state to the value different from
2840 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2841 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2843 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2846 if (CNIC_LOADED(bp))
2847 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2850 bnx2x_tx_disable(bp);
2851 netdev_reset_tc(bp->dev);
2853 bp->rx_mode = BNX2X_RX_MODE_NONE;
2855 del_timer_sync(&bp->timer);
2858 /* Set ALWAYS_ALIVE bit in shmem */
2859 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2860 bnx2x_drv_pulse(bp);
2861 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2862 bnx2x_save_statistics(bp);
2865 /* wait till consumers catch up with producers in all queues */
2866 bnx2x_drain_tx_queues(bp);
2868 /* if VF indicate to PF this function is going down (PF will delete sp
2869 * elements and clear initializations
2872 bnx2x_vfpf_close_vf(bp);
2873 else if (unload_mode != UNLOAD_RECOVERY)
2874 /* if this is a normal/close unload need to clean up chip*/
2875 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
2877 /* Send the UNLOAD_REQUEST to the MCP */
2878 bnx2x_send_unload_req(bp, unload_mode);
2881 * Prevent transactions to host from the functions on the
2882 * engine that doesn't reset global blocks in case of global
2883 * attention once gloabl blocks are reset and gates are opened
2884 * (the engine which leader will perform the recovery
2887 if (!CHIP_IS_E1x(bp))
2888 bnx2x_pf_disable(bp);
2890 /* Disable HW interrupts, NAPI */
2891 bnx2x_netif_stop(bp, 1);
2892 /* Delete all NAPI objects */
2893 bnx2x_del_all_napi(bp);
2894 if (CNIC_LOADED(bp))
2895 bnx2x_del_all_napi_cnic(bp);
2899 /* Report UNLOAD_DONE to MCP */
2900 bnx2x_send_unload_done(bp, false);
2904 * At this stage no more interrupts will arrive so we may safly clean
2905 * the queueable objects here in case they failed to get cleaned so far.
2908 bnx2x_squeeze_objects(bp);
2910 /* There should be no more pending SP commands at this stage */
2915 /* Free SKBs, SGEs, TPA pool and driver internals */
2916 bnx2x_free_skbs(bp);
2917 if (CNIC_LOADED(bp))
2918 bnx2x_free_skbs_cnic(bp);
2919 for_each_rx_queue(bp, i)
2920 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2922 bnx2x_free_fp_mem(bp);
2923 if (CNIC_LOADED(bp))
2924 bnx2x_free_fp_mem_cnic(bp);
2928 if (CNIC_LOADED(bp))
2929 bnx2x_free_mem_cnic(bp);
2931 bp->state = BNX2X_STATE_CLOSED;
2932 bp->cnic_loaded = false;
2934 /* Check if there are pending parity attentions. If there are - set
2935 * RECOVERY_IN_PROGRESS.
2937 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
2938 bnx2x_set_reset_in_progress(bp);
2940 /* Set RESET_IS_GLOBAL if needed */
2942 bnx2x_set_reset_global(bp);
2946 /* The last driver must disable a "close the gate" if there is no
2947 * parity attention or "process kill" pending.
2950 !bnx2x_clear_pf_load(bp) &&
2951 bnx2x_reset_is_done(bp, BP_PATH(bp)))
2952 bnx2x_disable_close_the_gate(bp);
2954 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2959 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2963 /* If there is no power capability, silently succeed */
2965 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2969 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2973 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2974 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2975 PCI_PM_CTRL_PME_STATUS));
2977 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2978 /* delay required during transition out of D3hot */
2983 /* If there are other clients above don't
2984 shut down the power */
2985 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2987 /* Don't shut down the power for emulation and FPGA */
2988 if (CHIP_REV_IS_SLOW(bp))
2991 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2995 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2997 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3000 /* No more memory access after this point until
3001 * device is brought back to D0.
3006 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3013 * net_device service functions
3015 int bnx2x_poll(struct napi_struct *napi, int budget)
3019 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3021 struct bnx2x *bp = fp->bp;
3024 #ifdef BNX2X_STOP_ON_ERROR
3025 if (unlikely(bp->panic)) {
3026 napi_complete(napi);
3031 for_each_cos_in_tx_queue(fp, cos)
3032 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3033 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3035 if (bnx2x_has_rx_work(fp)) {
3036 work_done += bnx2x_rx_int(fp, budget - work_done);
3038 /* must not complete if we consumed full budget */
3039 if (work_done >= budget)
3043 /* Fall out from the NAPI loop if needed */
3044 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3046 /* No need to update SB for FCoE L2 ring as long as
3047 * it's connected to the default SB and the SB
3048 * has been updated when NAPI was scheduled.
3050 if (IS_FCOE_FP(fp)) {
3051 napi_complete(napi);
3054 bnx2x_update_fpsb_idx(fp);
3055 /* bnx2x_has_rx_work() reads the status block,
3056 * thus we need to ensure that status block indices
3057 * have been actually read (bnx2x_update_fpsb_idx)
3058 * prior to this check (bnx2x_has_rx_work) so that
3059 * we won't write the "newer" value of the status block
3060 * to IGU (if there was a DMA right after
3061 * bnx2x_has_rx_work and if there is no rmb, the memory
3062 * reading (bnx2x_update_fpsb_idx) may be postponed
3063 * to right before bnx2x_ack_sb). In this case there
3064 * will never be another interrupt until there is
3065 * another update of the status block, while there
3066 * is still unhandled work.
3070 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3071 napi_complete(napi);
3072 /* Re-enable interrupts */
3073 DP(NETIF_MSG_RX_STATUS,
3074 "Update index to %d\n", fp->fp_hc_idx);
3075 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3076 le16_to_cpu(fp->fp_hc_idx),
3086 /* we split the first BD into headers and data BDs
3087 * to ease the pain of our fellow microcode engineers
3088 * we use one mapping for both BDs
3090 static u16 bnx2x_tx_split(struct bnx2x *bp,
3091 struct bnx2x_fp_txdata *txdata,
3092 struct sw_tx_bd *tx_buf,
3093 struct eth_tx_start_bd **tx_bd, u16 hlen,
3096 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3097 struct eth_tx_bd *d_tx_bd;
3099 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3101 /* first fix first BD */
3102 h_tx_bd->nbytes = cpu_to_le16(hlen);
3104 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3105 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3107 /* now get a new data BD
3108 * (after the pbd) and fill it */
3109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3110 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3112 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3113 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3115 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3116 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3117 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3119 /* this marks the BD as one that has no individual mapping */
3120 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3122 DP(NETIF_MSG_TX_QUEUED,
3123 "TSO split data size is %d (%x:%x)\n",
3124 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3127 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3132 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3133 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3134 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3136 __sum16 tsum = (__force __sum16) csum;
3139 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3140 csum_partial(t_header - fix, fix, 0)));
3143 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3144 csum_partial(t_header, -fix, 0)));
3146 return bswab16(tsum);
3149 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3155 if (skb->ip_summed != CHECKSUM_PARTIAL)
3158 protocol = vlan_get_protocol(skb);
3159 if (protocol == htons(ETH_P_IPV6)) {
3161 prot = ipv6_hdr(skb)->nexthdr;
3164 prot = ip_hdr(skb)->protocol;
3167 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3168 if (inner_ip_hdr(skb)->version == 6) {
3169 rc |= XMIT_CSUM_ENC_V6;
3170 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3171 rc |= XMIT_CSUM_TCP;
3173 rc |= XMIT_CSUM_ENC_V4;
3174 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3175 rc |= XMIT_CSUM_TCP;
3178 if (prot == IPPROTO_TCP)
3179 rc |= XMIT_CSUM_TCP;
3181 if (skb_is_gso_v6(skb)) {
3182 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
3183 if (rc & XMIT_CSUM_ENC)
3184 rc |= XMIT_GSO_ENC_V6;
3185 } else if (skb_is_gso(skb)) {
3186 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
3187 if (rc & XMIT_CSUM_ENC)
3188 rc |= XMIT_GSO_ENC_V4;
3194 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3195 /* check if packet requires linearization (packet is too fragmented)
3196 no need to check fragmentation if page size > 8K (there will be no
3197 violation to FW restrictions) */
3198 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3203 int first_bd_sz = 0;
3205 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3206 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3208 if (xmit_type & XMIT_GSO) {
3209 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3210 /* Check if LSO packet needs to be copied:
3211 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3212 int wnd_size = MAX_FETCH_BD - 3;
3213 /* Number of windows to check */
3214 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3219 /* Headers length */
3220 hlen = (int)(skb_transport_header(skb) - skb->data) +
3223 /* Amount of data (w/o headers) on linear part of SKB*/
3224 first_bd_sz = skb_headlen(skb) - hlen;
3226 wnd_sum = first_bd_sz;
3228 /* Calculate the first sum - it's special */
3229 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3231 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3233 /* If there was data on linear skb data - check it */
3234 if (first_bd_sz > 0) {
3235 if (unlikely(wnd_sum < lso_mss)) {
3240 wnd_sum -= first_bd_sz;
3243 /* Others are easier: run through the frag list and
3244 check all windows */
3245 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3247 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3249 if (unlikely(wnd_sum < lso_mss)) {
3254 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3257 /* in non-LSO too fragmented packet should always
3264 if (unlikely(to_copy))
3265 DP(NETIF_MSG_TX_QUEUED,
3266 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3267 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3268 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3274 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3277 struct ipv6hdr *ipv6;
3279 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3280 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3281 ETH_TX_PARSE_BD_E2_LSO_MSS;
3283 if (xmit_type & XMIT_GSO_ENC_V6)
3284 ipv6 = inner_ipv6_hdr(skb);
3285 else if (xmit_type & XMIT_GSO_V6)
3286 ipv6 = ipv6_hdr(skb);
3290 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3291 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3295 * bnx2x_set_pbd_gso - update PBD in GSO case.
3299 * @xmit_type: xmit flags
3301 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3302 struct eth_tx_parse_bd_e1x *pbd,
3305 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3306 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3307 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3309 if (xmit_type & XMIT_GSO_V4) {
3310 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3311 pbd->tcp_pseudo_csum =
3312 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3314 0, IPPROTO_TCP, 0));
3317 pbd->tcp_pseudo_csum =
3318 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3319 &ipv6_hdr(skb)->daddr,
3320 0, IPPROTO_TCP, 0));
3323 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3327 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3329 * @bp: driver handle
3331 * @parsing_data: data to be updated
3332 * @xmit_type: xmit flags
3334 * 57712/578xx related, when skb has encapsulation
3336 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3337 u32 *parsing_data, u32 xmit_type)
3340 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3341 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3342 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3344 if (xmit_type & XMIT_CSUM_TCP) {
3345 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3346 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3347 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3349 return skb_inner_transport_header(skb) +
3350 inner_tcp_hdrlen(skb) - skb->data;
3353 /* We support checksum offload for TCP and UDP only.
3354 * No need to pass the UDP header length - it's a constant.
3356 return skb_inner_transport_header(skb) +
3357 sizeof(struct udphdr) - skb->data;
3361 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3363 * @bp: driver handle
3365 * @parsing_data: data to be updated
3366 * @xmit_type: xmit flags
3368 * 57712/578xx related
3370 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3371 u32 *parsing_data, u32 xmit_type)
3374 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3375 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3376 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3378 if (xmit_type & XMIT_CSUM_TCP) {
3379 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3380 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3381 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3383 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3385 /* We support checksum offload for TCP and UDP only.
3386 * No need to pass the UDP header length - it's a constant.
3388 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3391 /* set FW indication according to inner or outer protocols if tunneled */
3392 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3393 struct eth_tx_start_bd *tx_start_bd,
3396 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3398 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3399 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3401 if (!(xmit_type & XMIT_CSUM_TCP))
3402 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3406 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3408 * @bp: driver handle
3410 * @pbd: parse BD to be updated
3411 * @xmit_type: xmit flags
3413 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3414 struct eth_tx_parse_bd_e1x *pbd,
3417 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3419 /* for now NS flag is not used in Linux */
3422 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3423 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3425 pbd->ip_hlen_w = (skb_transport_header(skb) -
3426 skb_network_header(skb)) >> 1;
3428 hlen += pbd->ip_hlen_w;
3430 /* We support checksum offload for TCP and UDP only */
3431 if (xmit_type & XMIT_CSUM_TCP)
3432 hlen += tcp_hdrlen(skb) / 2;
3434 hlen += sizeof(struct udphdr) / 2;
3436 pbd->total_hlen_w = cpu_to_le16(hlen);
3439 if (xmit_type & XMIT_CSUM_TCP) {
3440 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3443 s8 fix = SKB_CS_OFF(skb); /* signed! */
3445 DP(NETIF_MSG_TX_QUEUED,
3446 "hlen %d fix %d csum before fix %x\n",
3447 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3449 /* HW bug: fixup the CSUM */
3450 pbd->tcp_pseudo_csum =
3451 bnx2x_csum_fix(skb_transport_header(skb),
3454 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3455 pbd->tcp_pseudo_csum);
3461 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3462 struct eth_tx_parse_bd_e2 *pbd_e2,
3463 struct eth_tx_parse_2nd_bd *pbd2,
3467 u16 inner_hlen_w = 0;
3468 u8 outerip_off, outerip_len = 0;
3471 inner_hlen_w = (skb_inner_transport_header(skb) -
3472 skb_inner_network_header(skb)) >> 1;
3475 if (xmit_type & XMIT_CSUM_TCP)
3476 inner_hlen_w += inner_tcp_hdrlen(skb) >> 1;
3478 inner_hlen_w += sizeof(struct udphdr) >> 1;
3480 pbd2->fw_ip_hdr_to_payload_w = inner_hlen_w;
3482 if (xmit_type & XMIT_CSUM_ENC_V4) {
3483 struct iphdr *iph = inner_ip_hdr(skb);
3485 pbd2->fw_ip_csum_wo_len_flags_frag =
3486 bswab16(csum_fold((~iph->check) -
3487 iph->tot_len - iph->frag_off));
3489 pbd2->fw_ip_hdr_to_payload_w =
3490 inner_hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3493 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3495 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3497 if (xmit_type & XMIT_GSO_V4) {
3498 pbd2->hw_ip_id = bswab16(ip_hdr(skb)->id);
3500 pbd_e2->data.tunnel_data.pseudo_csum =
3501 bswab16(~csum_tcpudp_magic(
3502 inner_ip_hdr(skb)->saddr,
3503 inner_ip_hdr(skb)->daddr,
3504 0, IPPROTO_TCP, 0));
3506 outerip_len = ip_hdr(skb)->ihl << 1;
3508 pbd_e2->data.tunnel_data.pseudo_csum =
3509 bswab16(~csum_ipv6_magic(
3510 &inner_ipv6_hdr(skb)->saddr,
3511 &inner_ipv6_hdr(skb)->daddr,
3512 0, IPPROTO_TCP, 0));
3515 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3519 (!!(xmit_type & XMIT_CSUM_V6) <<
3520 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3522 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3523 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3524 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3527 /* called with netif_tx_lock
3528 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3529 * netif_wake_queue()
3531 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3533 struct bnx2x *bp = netdev_priv(dev);
3535 struct netdev_queue *txq;
3536 struct bnx2x_fp_txdata *txdata;
3537 struct sw_tx_bd *tx_buf;
3538 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3539 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3540 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3541 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3542 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3543 u32 pbd_e2_parsing_data = 0;
3544 u16 pkt_prod, bd_prod;
3547 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3550 __le16 pkt_size = 0;
3552 u8 mac_type = UNICAST_ADDRESS;
3554 #ifdef BNX2X_STOP_ON_ERROR
3555 if (unlikely(bp->panic))
3556 return NETDEV_TX_BUSY;
3559 txq_index = skb_get_queue_mapping(skb);
3560 txq = netdev_get_tx_queue(dev, txq_index);
3562 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3564 txdata = &bp->bnx2x_txq[txq_index];
3566 /* enable this debug print to view the transmission queue being used
3567 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3568 txq_index, fp_index, txdata_index); */
3570 /* enable this debug print to view the tranmission details
3571 DP(NETIF_MSG_TX_QUEUED,
3572 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3573 txdata->cid, fp_index, txdata_index, txdata, fp); */
3575 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3576 skb_shinfo(skb)->nr_frags +
3578 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3579 /* Handle special storage cases separately */
3580 if (txdata->tx_ring_size == 0) {
3581 struct bnx2x_eth_q_stats *q_stats =
3582 bnx2x_fp_qstats(bp, txdata->parent_fp);
3583 q_stats->driver_filtered_tx_pkt++;
3585 return NETDEV_TX_OK;
3587 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3588 netif_tx_stop_queue(txq);
3589 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3591 return NETDEV_TX_BUSY;
3594 DP(NETIF_MSG_TX_QUEUED,
3595 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3596 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3597 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3600 eth = (struct ethhdr *)skb->data;
3602 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3603 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3604 if (is_broadcast_ether_addr(eth->h_dest))
3605 mac_type = BROADCAST_ADDRESS;
3607 mac_type = MULTICAST_ADDRESS;
3610 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3611 /* First, check if we need to linearize the skb (due to FW
3612 restrictions). No need to check fragmentation if page size > 8K
3613 (there will be no violation to FW restrictions) */
3614 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3615 /* Statistics of linearization */
3617 if (skb_linearize(skb) != 0) {
3618 DP(NETIF_MSG_TX_QUEUED,
3619 "SKB linearization failed - silently dropping this SKB\n");
3620 dev_kfree_skb_any(skb);
3621 return NETDEV_TX_OK;
3625 /* Map skb linear data for DMA */
3626 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3627 skb_headlen(skb), DMA_TO_DEVICE);
3628 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3629 DP(NETIF_MSG_TX_QUEUED,
3630 "SKB mapping failed - silently dropping this SKB\n");
3631 dev_kfree_skb_any(skb);
3632 return NETDEV_TX_OK;
3635 Please read carefully. First we use one BD which we mark as start,
3636 then we have a parsing info BD (used for TSO or xsum),
3637 and only then we have the rest of the TSO BDs.
3638 (don't forget to mark the last one as last,
3639 and to unmap only AFTER you write to the BD ...)
3640 And above all, all pdb sizes are in words - NOT DWORDS!
3643 /* get current pkt produced now - advance it just before sending packet
3644 * since mapping of pages may fail and cause packet to be dropped
3646 pkt_prod = txdata->tx_pkt_prod;
3647 bd_prod = TX_BD(txdata->tx_bd_prod);
3649 /* get a tx_buf and first BD
3650 * tx_start_bd may be changed during SPLIT,
3651 * but first_bd will always stay first
3653 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3654 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3655 first_bd = tx_start_bd;
3657 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3659 /* header nbd: indirectly zero other flags! */
3660 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3662 /* remember the first BD of the packet */
3663 tx_buf->first_bd = txdata->tx_bd_prod;
3667 DP(NETIF_MSG_TX_QUEUED,
3668 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3669 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3671 if (vlan_tx_tag_present(skb)) {
3672 tx_start_bd->vlan_or_ethertype =
3673 cpu_to_le16(vlan_tx_tag_get(skb));
3674 tx_start_bd->bd_flags.as_bitfield |=
3675 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3677 /* when transmitting in a vf, start bd must hold the ethertype
3678 * for fw to enforce it
3681 tx_start_bd->vlan_or_ethertype =
3682 cpu_to_le16(ntohs(eth->h_proto));
3684 /* used by FW for packet accounting */
3685 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3688 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3690 /* turn on parsing and get a BD */
3691 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3693 if (xmit_type & XMIT_CSUM)
3694 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3696 if (!CHIP_IS_E1x(bp)) {
3697 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3698 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3700 if (xmit_type & XMIT_CSUM_ENC) {
3701 u16 global_data = 0;
3703 /* Set PBD in enc checksum offload case */
3704 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3705 &pbd_e2_parsing_data,
3708 /* turn on 2nd parsing and get a BD */
3709 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3711 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3713 memset(pbd2, 0, sizeof(*pbd2));
3715 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3716 (skb_inner_network_header(skb) -
3719 if (xmit_type & XMIT_GSO_ENC)
3720 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3724 pbd2->global_data = cpu_to_le16(global_data);
3726 /* add addition parse BD indication to start BD */
3727 SET_FLAG(tx_start_bd->general_data,
3728 ETH_TX_START_BD_PARSE_NBDS, 1);
3729 /* set encapsulation flag in start BD */
3730 SET_FLAG(tx_start_bd->general_data,
3731 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3733 } else if (xmit_type & XMIT_CSUM) {
3734 /* Set PBD in checksum offload case w/o encapsulation */
3735 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3736 &pbd_e2_parsing_data,
3740 /* Add the macs to the parsing BD this is a vf */
3742 /* override GRE parameters in BD */
3743 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3744 &pbd_e2->data.mac_addr.src_mid,
3745 &pbd_e2->data.mac_addr.src_lo,
3748 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3749 &pbd_e2->data.mac_addr.dst_mid,
3750 &pbd_e2->data.mac_addr.dst_lo,
3754 SET_FLAG(pbd_e2_parsing_data,
3755 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3757 u16 global_data = 0;
3758 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3759 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3760 /* Set PBD in checksum offload case */
3761 if (xmit_type & XMIT_CSUM)
3762 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3764 SET_FLAG(global_data,
3765 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3766 pbd_e1x->global_data |= cpu_to_le16(global_data);
3769 /* Setup the data pointer of the first BD of the packet */
3770 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3771 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3772 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3773 pkt_size = tx_start_bd->nbytes;
3775 DP(NETIF_MSG_TX_QUEUED,
3776 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3777 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3778 le16_to_cpu(tx_start_bd->nbytes),
3779 tx_start_bd->bd_flags.as_bitfield,
3780 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3782 if (xmit_type & XMIT_GSO) {
3784 DP(NETIF_MSG_TX_QUEUED,
3785 "TSO packet len %d hlen %d total len %d tso size %d\n",
3786 skb->len, hlen, skb_headlen(skb),
3787 skb_shinfo(skb)->gso_size);
3789 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3791 if (unlikely(skb_headlen(skb) > hlen)) {
3793 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3797 if (!CHIP_IS_E1x(bp))
3798 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3801 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3804 /* Set the PBD's parsing_data field if not zero
3805 * (for the chips newer than 57711).
3807 if (pbd_e2_parsing_data)
3808 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3810 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3812 /* Handle fragmented skb */
3813 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3814 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3816 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3817 skb_frag_size(frag), DMA_TO_DEVICE);
3818 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3819 unsigned int pkts_compl = 0, bytes_compl = 0;
3821 DP(NETIF_MSG_TX_QUEUED,
3822 "Unable to map page - dropping packet...\n");
3824 /* we need unmap all buffers already mapped
3826 * first_bd->nbd need to be properly updated
3827 * before call to bnx2x_free_tx_pkt
3829 first_bd->nbd = cpu_to_le16(nbd);
3830 bnx2x_free_tx_pkt(bp, txdata,
3831 TX_BD(txdata->tx_pkt_prod),
3832 &pkts_compl, &bytes_compl);
3833 return NETDEV_TX_OK;
3836 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3837 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3838 if (total_pkt_bd == NULL)
3839 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3841 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3842 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3843 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3844 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3847 DP(NETIF_MSG_TX_QUEUED,
3848 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3849 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3850 le16_to_cpu(tx_data_bd->nbytes));
3853 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3855 /* update with actual num BDs */
3856 first_bd->nbd = cpu_to_le16(nbd);
3858 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3860 /* now send a tx doorbell, counting the next BD
3861 * if the packet contains or ends with it
3863 if (TX_BD_POFF(bd_prod) < nbd)
3866 /* total_pkt_bytes should be set on the first data BD if
3867 * it's not an LSO packet and there is more than one
3868 * data BD. In this case pkt_size is limited by an MTU value.
3869 * However we prefer to set it for an LSO packet (while we don't
3870 * have to) in order to save some CPU cycles in a none-LSO
3871 * case, when we much more care about them.
3873 if (total_pkt_bd != NULL)
3874 total_pkt_bd->total_pkt_bytes = pkt_size;
3877 DP(NETIF_MSG_TX_QUEUED,
3878 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3879 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3880 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3881 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3882 le16_to_cpu(pbd_e1x->total_hlen_w));
3884 DP(NETIF_MSG_TX_QUEUED,
3885 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3887 pbd_e2->data.mac_addr.dst_hi,
3888 pbd_e2->data.mac_addr.dst_mid,
3889 pbd_e2->data.mac_addr.dst_lo,
3890 pbd_e2->data.mac_addr.src_hi,
3891 pbd_e2->data.mac_addr.src_mid,
3892 pbd_e2->data.mac_addr.src_lo,
3893 pbd_e2->parsing_data);
3894 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3896 netdev_tx_sent_queue(txq, skb->len);
3898 skb_tx_timestamp(skb);
3900 txdata->tx_pkt_prod++;
3902 * Make sure that the BD data is updated before updating the producer
3903 * since FW might read the BD right after the producer is updated.
3904 * This is only applicable for weak-ordered memory model archs such
3905 * as IA-64. The following barrier is also mandatory since FW will
3906 * assumes packets must have BDs.
3910 txdata->tx_db.data.prod += nbd;
3913 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3917 txdata->tx_bd_prod += nbd;
3919 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
3920 netif_tx_stop_queue(txq);
3922 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3923 * ordering of set_bit() in netif_tx_stop_queue() and read of
3927 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3928 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
3929 netif_tx_wake_queue(txq);
3933 return NETDEV_TX_OK;
3937 * bnx2x_setup_tc - routine to configure net_device for multi tc
3939 * @netdev: net device to configure
3940 * @tc: number of traffic classes to enable
3942 * callback connected to the ndo_setup_tc function pointer
3944 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3946 int cos, prio, count, offset;
3947 struct bnx2x *bp = netdev_priv(dev);
3949 /* setup tc must be called under rtnl lock */
3952 /* no traffic classes requested. aborting */
3954 netdev_reset_tc(dev);
3958 /* requested to support too many traffic classes */
3959 if (num_tc > bp->max_cos) {
3960 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3961 num_tc, bp->max_cos);
3965 /* declare amount of supported traffic classes */
3966 if (netdev_set_num_tc(dev, num_tc)) {
3967 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3971 /* configure priority to traffic class mapping */
3972 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3973 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3974 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3975 "mapping priority %d to tc %d\n",
3976 prio, bp->prio_to_cos[prio]);
3980 /* Use this configuration to diffrentiate tc0 from other COSes
3981 This can be used for ets or pfc, and save the effort of setting
3982 up a multio class queue disc or negotiating DCBX with a switch
3983 netdev_set_prio_tc_map(dev, 0, 0);
3984 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3985 for (prio = 1; prio < 16; prio++) {
3986 netdev_set_prio_tc_map(dev, prio, 1);
3987 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3990 /* configure traffic class to transmission queue mapping */
3991 for (cos = 0; cos < bp->max_cos; cos++) {
3992 count = BNX2X_NUM_ETH_QUEUES(bp);
3993 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
3994 netdev_set_tc_queue(dev, cos, count, offset);
3995 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3996 "mapping tc %d to offset %d count %d\n",
3997 cos, offset, count);
4003 /* called with rtnl_lock */
4004 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4006 struct sockaddr *addr = p;
4007 struct bnx2x *bp = netdev_priv(dev);
4010 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4011 BNX2X_ERR("Requested MAC address is not valid\n");
4015 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4016 !is_zero_ether_addr(addr->sa_data)) {
4017 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4021 if (netif_running(dev)) {
4022 rc = bnx2x_set_eth_mac(bp, false);
4027 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4029 if (netif_running(dev))
4030 rc = bnx2x_set_eth_mac(bp, true);
4035 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4037 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4038 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4043 if (IS_FCOE_IDX(fp_index)) {
4044 memset(sb, 0, sizeof(union host_hc_status_block));
4045 fp->status_blk_mapping = 0;
4048 if (!CHIP_IS_E1x(bp))
4049 BNX2X_PCI_FREE(sb->e2_sb,
4050 bnx2x_fp(bp, fp_index,
4051 status_blk_mapping),
4052 sizeof(struct host_hc_status_block_e2));
4054 BNX2X_PCI_FREE(sb->e1x_sb,
4055 bnx2x_fp(bp, fp_index,
4056 status_blk_mapping),
4057 sizeof(struct host_hc_status_block_e1x));
4061 if (!skip_rx_queue(bp, fp_index)) {
4062 bnx2x_free_rx_bds(fp);
4064 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4065 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4066 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4067 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4068 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4070 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4071 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4072 sizeof(struct eth_fast_path_rx_cqe) *
4076 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4077 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4078 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4079 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4083 if (!skip_tx_queue(bp, fp_index)) {
4084 /* fastpath tx rings: tx_buf tx_desc */
4085 for_each_cos_in_tx_queue(fp, cos) {
4086 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4088 DP(NETIF_MSG_IFDOWN,
4089 "freeing tx memory of fp %d cos %d cid %d\n",
4090 fp_index, cos, txdata->cid);
4092 BNX2X_FREE(txdata->tx_buf_ring);
4093 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4094 txdata->tx_desc_mapping,
4095 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4098 /* end of fastpath */
4101 void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4104 for_each_cnic_queue(bp, i)
4105 bnx2x_free_fp_mem_at(bp, i);
4108 void bnx2x_free_fp_mem(struct bnx2x *bp)
4111 for_each_eth_queue(bp, i)
4112 bnx2x_free_fp_mem_at(bp, i);
4115 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4117 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4118 if (!CHIP_IS_E1x(bp)) {
4119 bnx2x_fp(bp, index, sb_index_values) =
4120 (__le16 *)status_blk.e2_sb->sb.index_values;
4121 bnx2x_fp(bp, index, sb_running_index) =
4122 (__le16 *)status_blk.e2_sb->sb.running_index;
4124 bnx2x_fp(bp, index, sb_index_values) =
4125 (__le16 *)status_blk.e1x_sb->sb.index_values;
4126 bnx2x_fp(bp, index, sb_running_index) =
4127 (__le16 *)status_blk.e1x_sb->sb.running_index;
4131 /* Returns the number of actually allocated BDs */
4132 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4135 struct bnx2x *bp = fp->bp;
4136 u16 ring_prod, cqe_ring_prod;
4137 int i, failure_cnt = 0;
4139 fp->rx_comp_cons = 0;
4140 cqe_ring_prod = ring_prod = 0;
4142 /* This routine is called only during fo init so
4143 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4145 for (i = 0; i < rx_ring_size; i++) {
4146 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
4150 ring_prod = NEXT_RX_IDX(ring_prod);
4151 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4152 WARN_ON(ring_prod <= (i - failure_cnt));
4156 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4157 i - failure_cnt, fp->index);
4159 fp->rx_bd_prod = ring_prod;
4160 /* Limit the CQE producer by the CQE ring size */
4161 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4163 fp->rx_pkt = fp->rx_calls = 0;
4165 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4167 return i - failure_cnt;
4170 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4174 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4175 struct eth_rx_cqe_next_page *nextpg;
4177 nextpg = (struct eth_rx_cqe_next_page *)
4178 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4180 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4181 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4183 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4184 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4188 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4190 union host_hc_status_block *sb;
4191 struct bnx2x_fastpath *fp = &bp->fp[index];
4194 int rx_ring_size = 0;
4196 if (!bp->rx_ring_size &&
4197 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4198 rx_ring_size = MIN_RX_SIZE_NONTPA;
4199 bp->rx_ring_size = rx_ring_size;
4200 } else if (!bp->rx_ring_size) {
4201 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4203 if (CHIP_IS_E3(bp)) {
4204 u32 cfg = SHMEM_RD(bp,
4205 dev_info.port_hw_config[BP_PORT(bp)].
4208 /* Decrease ring size for 1G functions */
4209 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4210 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4214 /* allocate at least number of buffers required by FW */
4215 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4216 MIN_RX_SIZE_TPA, rx_ring_size);
4218 bp->rx_ring_size = rx_ring_size;
4219 } else /* if rx_ring_size specified - use it */
4220 rx_ring_size = bp->rx_ring_size;
4222 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4225 sb = &bnx2x_fp(bp, index, status_blk);
4227 if (!IS_FCOE_IDX(index)) {
4229 if (!CHIP_IS_E1x(bp))
4230 BNX2X_PCI_ALLOC(sb->e2_sb,
4231 &bnx2x_fp(bp, index, status_blk_mapping),
4232 sizeof(struct host_hc_status_block_e2));
4234 BNX2X_PCI_ALLOC(sb->e1x_sb,
4235 &bnx2x_fp(bp, index, status_blk_mapping),
4236 sizeof(struct host_hc_status_block_e1x));
4239 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4240 * set shortcuts for it.
4242 if (!IS_FCOE_IDX(index))
4243 set_sb_shortcuts(bp, index);
4246 if (!skip_tx_queue(bp, index)) {
4247 /* fastpath tx rings: tx_buf tx_desc */
4248 for_each_cos_in_tx_queue(fp, cos) {
4249 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4252 "allocating tx memory of fp %d cos %d\n",
4255 BNX2X_ALLOC(txdata->tx_buf_ring,
4256 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4257 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4258 &txdata->tx_desc_mapping,
4259 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4264 if (!skip_rx_queue(bp, index)) {
4265 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4266 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4267 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4268 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4269 &bnx2x_fp(bp, index, rx_desc_mapping),
4270 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4272 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4273 &bnx2x_fp(bp, index, rx_comp_mapping),
4274 sizeof(struct eth_fast_path_rx_cqe) *
4278 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4279 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4281 &bnx2x_fp(bp, index, rx_sge_mapping),
4282 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4284 bnx2x_set_next_page_rx_bd(fp);
4287 bnx2x_set_next_page_rx_cq(fp);
4290 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4291 if (ring_size < rx_ring_size)
4297 /* handles low memory cases */
4299 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4301 /* FW will drop all packets if queue is not big enough,
4302 * In these cases we disable the queue
4303 * Min size is different for OOO, TPA and non-TPA queues
4305 if (ring_size < (fp->disable_tpa ?
4306 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4307 /* release memory allocated for this queue */
4308 bnx2x_free_fp_mem_at(bp, index);
4314 int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4318 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4319 /* we will fail load process instead of mark
4327 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4331 /* 1. Allocate FP for leading - fatal if error
4332 * 2. Allocate RSS - fix number of queues if error
4336 if (bnx2x_alloc_fp_mem_at(bp, 0))
4340 for_each_nondefault_eth_queue(bp, i)
4341 if (bnx2x_alloc_fp_mem_at(bp, i))
4344 /* handle memory failures */
4345 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4346 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4349 bnx2x_shrink_eth_fp(bp, delta);
4350 if (CNIC_SUPPORT(bp))
4351 /* move non eth FPs next to last eth FP
4352 * must be done in that order
4353 * FCOE_IDX < FWD_IDX < OOO_IDX
4356 /* move FCoE fp even NO_FCOE_FLAG is on */
4357 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4358 bp->num_ethernet_queues -= delta;
4359 bp->num_queues = bp->num_ethernet_queues +
4360 bp->num_cnic_queues;
4361 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4362 bp->num_queues + delta, bp->num_queues);
4368 void bnx2x_free_mem_bp(struct bnx2x *bp)
4372 for (i = 0; i < bp->fp_array_size; i++)
4373 kfree(bp->fp[i].tpa_info);
4376 kfree(bp->fp_stats);
4377 kfree(bp->bnx2x_txq);
4378 kfree(bp->msix_table);
4382 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4384 struct bnx2x_fastpath *fp;
4385 struct msix_entry *tbl;
4386 struct bnx2x_ilt *ilt;
4387 int msix_table_size = 0;
4388 int fp_array_size, txq_array_size;
4392 * The biggest MSI-X table we might need is as a maximum number of fast
4393 * path IGU SBs plus default SB (for PF only).
4395 msix_table_size = bp->igu_sb_cnt;
4398 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4400 /* fp array: RSS plus CNIC related L2 queues */
4401 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4402 bp->fp_array_size = fp_array_size;
4403 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4405 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4408 for (i = 0; i < bp->fp_array_size; i++) {
4410 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4411 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4412 if (!(fp[i].tpa_info))
4418 /* allocate sp objs */
4419 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4424 /* allocate fp_stats */
4425 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4430 /* Allocate memory for the transmission queues array */
4432 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4433 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4435 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4441 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4444 bp->msix_table = tbl;
4447 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4454 bnx2x_free_mem_bp(bp);
4459 int bnx2x_reload_if_running(struct net_device *dev)
4461 struct bnx2x *bp = netdev_priv(dev);
4463 if (unlikely(!netif_running(dev)))
4466 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4467 return bnx2x_nic_load(bp, LOAD_NORMAL);
4470 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4472 u32 sel_phy_idx = 0;
4473 if (bp->link_params.num_phys <= 1)
4476 if (bp->link_vars.link_up) {
4477 sel_phy_idx = EXT_PHY1;
4478 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4479 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4480 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4481 sel_phy_idx = EXT_PHY2;
4484 switch (bnx2x_phy_selection(&bp->link_params)) {
4485 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4486 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4487 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4488 sel_phy_idx = EXT_PHY1;
4490 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4491 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4492 sel_phy_idx = EXT_PHY2;
4500 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4502 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4504 * The selected activated PHY is always after swapping (in case PHY
4505 * swapping is enabled). So when swapping is enabled, we need to reverse
4509 if (bp->link_params.multi_phy_config &
4510 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4511 if (sel_phy_idx == EXT_PHY1)
4512 sel_phy_idx = EXT_PHY2;
4513 else if (sel_phy_idx == EXT_PHY2)
4514 sel_phy_idx = EXT_PHY1;
4516 return LINK_CONFIG_IDX(sel_phy_idx);
4519 #ifdef NETDEV_FCOE_WWNN
4520 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4522 struct bnx2x *bp = netdev_priv(dev);
4523 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4526 case NETDEV_FCOE_WWNN:
4527 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4528 cp->fcoe_wwn_node_name_lo);
4530 case NETDEV_FCOE_WWPN:
4531 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4532 cp->fcoe_wwn_port_name_lo);
4535 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4543 /* called with rtnl_lock */
4544 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4546 struct bnx2x *bp = netdev_priv(dev);
4548 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4549 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4553 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4554 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4555 BNX2X_ERR("Can't support requested MTU size\n");
4559 /* This does not race with packet allocation
4560 * because the actual alloc size is
4561 * only updated as part of load
4565 return bnx2x_reload_if_running(dev);
4568 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4569 netdev_features_t features)
4571 struct bnx2x *bp = netdev_priv(dev);
4573 /* TPA requires Rx CSUM offloading */
4574 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4575 features &= ~NETIF_F_LRO;
4576 features &= ~NETIF_F_GRO;
4582 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4584 struct bnx2x *bp = netdev_priv(dev);
4585 u32 flags = bp->flags;
4586 bool bnx2x_reload = false;
4588 if (features & NETIF_F_LRO)
4589 flags |= TPA_ENABLE_FLAG;
4591 flags &= ~TPA_ENABLE_FLAG;
4593 if (features & NETIF_F_GRO)
4594 flags |= GRO_ENABLE_FLAG;
4596 flags &= ~GRO_ENABLE_FLAG;
4598 if (features & NETIF_F_LOOPBACK) {
4599 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4600 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4601 bnx2x_reload = true;
4604 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4605 bp->link_params.loopback_mode = LOOPBACK_NONE;
4606 bnx2x_reload = true;
4610 if (flags ^ bp->flags) {
4612 bnx2x_reload = true;
4616 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4617 return bnx2x_reload_if_running(dev);
4618 /* else: bnx2x_nic_load() will be called at end of recovery */
4624 void bnx2x_tx_timeout(struct net_device *dev)
4626 struct bnx2x *bp = netdev_priv(dev);
4628 #ifdef BNX2X_STOP_ON_ERROR
4633 smp_mb__before_clear_bit();
4634 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4635 smp_mb__after_clear_bit();
4637 /* This allows the netif to be shutdown gracefully before resetting */
4638 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4641 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4643 struct net_device *dev = pci_get_drvdata(pdev);
4647 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4650 bp = netdev_priv(dev);
4654 pci_save_state(pdev);
4656 if (!netif_running(dev)) {
4661 netif_device_detach(dev);
4663 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4665 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4672 int bnx2x_resume(struct pci_dev *pdev)
4674 struct net_device *dev = pci_get_drvdata(pdev);
4679 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4682 bp = netdev_priv(dev);
4684 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4685 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4691 pci_restore_state(pdev);
4693 if (!netif_running(dev)) {
4698 bnx2x_set_power_state(bp, PCI_D0);
4699 netif_device_attach(dev);
4701 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4709 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4712 /* ustorm cxt validation */
4713 cxt->ustorm_ag_context.cdu_usage =
4714 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4715 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4716 /* xcontext validation */
4717 cxt->xstorm_ag_context.cdu_reserved =
4718 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4719 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4722 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4723 u8 fw_sb_id, u8 sb_index,
4727 u32 addr = BAR_CSTRORM_INTMEM +
4728 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4729 REG_WR8(bp, addr, ticks);
4731 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4732 port, fw_sb_id, sb_index, ticks);
4735 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4736 u16 fw_sb_id, u8 sb_index,
4739 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4740 u32 addr = BAR_CSTRORM_INTMEM +
4741 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4742 u16 flags = REG_RD16(bp, addr);
4744 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4745 flags |= enable_flag;
4746 REG_WR16(bp, addr, flags);
4748 "port %x fw_sb_id %d sb_index %d disable %d\n",
4749 port, fw_sb_id, sb_index, disable);
4752 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4753 u8 sb_index, u8 disable, u16 usec)
4755 int port = BP_PORT(bp);
4756 u8 ticks = usec / BNX2X_BTR;
4758 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4760 disable = disable ? 1 : (usec ? 0 : 1);
4761 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);