bnx2x: allocate memory dynamically in ethtool self-test.
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
85b26ea1 3 * Copyright (c) 2007-2012 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
f2e0899f 24#include <net/ipv6.h>
7f3e01fe 25#include <net/ip6_checksum.h>
6891dd25 26#include <linux/firmware.h>
c0cba59e 27#include <linux/prefetch.h>
9f6c9258 28#include "bnx2x_cmn.h"
523224a3 29#include "bnx2x_init.h"
042181f5 30#include "bnx2x_sp.h"
523224a3 31
619c5cb6 32
9f6c9258 33
b3b83c3f
DK
34/**
35 * bnx2x_move_fp - move content of the fastpath structure.
36 *
37 * @bp: driver handle
38 * @from: source FP index
39 * @to: destination FP index
40 *
41 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
42 * intact. This is done by first copying the napi struct from
43 * the target to the source, and then mem copying the entire
44 * source onto the target
b3b83c3f
DK
45 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
72754080
AE
50
51 /* Copy the NAPI object as it has been already initialized */
52 from_fp->napi = to_fp->napi;
53
b3b83c3f
DK
54 /* Move bnx2x_fastpath contents */
55 memcpy(to_fp, from_fp, sizeof(*to_fp));
56 to_fp->index = to;
b3b83c3f
DK
57}
58
619c5cb6
VZ
59int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
60
9f6c9258
DK
61/* free skb in the packet ring at pos idx
62 * return idx of last bd freed
63 */
6383c0b3 64static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
65 u16 idx, unsigned int *pkts_compl,
66 unsigned int *bytes_compl)
9f6c9258 67{
6383c0b3 68 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
69 struct eth_tx_start_bd *tx_start_bd;
70 struct eth_tx_bd *tx_data_bd;
71 struct sk_buff *skb = tx_buf->skb;
72 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
73 int nbd;
74
75 /* prefetch skb end pointer to speedup dev_kfree_skb() */
76 prefetch(&skb->end);
77
619c5cb6 78 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 79 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
80
81 /* unmap first bd */
82 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
6383c0b3 83 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 84 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 85 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 86
619c5cb6 87
9f6c9258
DK
88 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
89#ifdef BNX2X_STOP_ON_ERROR
90 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
91 BNX2X_ERR("BAD nbd!\n");
92 bnx2x_panic();
93 }
94#endif
95 new_cons = nbd + tx_buf->first_bd;
96
97 /* Get the next bd */
98 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
99
100 /* Skip a parse bd... */
101 --nbd;
102 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
103
104 /* ...and the TSO split header bd since they have no mapping */
105 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
106 --nbd;
107 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
108 }
109
110 /* now free frags */
111 while (nbd > 0) {
112
113 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
6383c0b3 114 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
115 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
116 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
117 if (--nbd)
118 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
119 }
120
121 /* release skb */
122 WARN_ON(!skb);
2df1a70a
TH
123 if (skb) {
124 (*pkts_compl)++;
125 (*bytes_compl) += skb->len;
126 }
40955532 127 dev_kfree_skb_any(skb);
9f6c9258
DK
128 tx_buf->first_bd = 0;
129 tx_buf->skb = NULL;
130
131 return new_cons;
132}
133
6383c0b3 134int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 135{
9f6c9258 136 struct netdev_queue *txq;
6383c0b3 137 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 138 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
139
140#ifdef BNX2X_STOP_ON_ERROR
141 if (unlikely(bp->panic))
142 return -1;
143#endif
144
6383c0b3
AE
145 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
146 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
147 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
148
149 while (sw_cons != hw_cons) {
150 u16 pkt_cons;
151
152 pkt_cons = TX_BD(sw_cons);
153
f2e0899f
DK
154 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
155 " pkt_cons %u\n",
6383c0b3 156 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 157
2df1a70a
TH
158 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
159 &pkts_compl, &bytes_compl);
160
9f6c9258
DK
161 sw_cons++;
162 }
163
2df1a70a
TH
164 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
165
6383c0b3
AE
166 txdata->tx_pkt_cons = sw_cons;
167 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
168
169 /* Need to make the tx_bd_cons update visible to start_xmit()
170 * before checking for netif_tx_queue_stopped(). Without the
171 * memory barrier, there is a small possibility that
172 * start_xmit() will miss it and cause the queue to be stopped
173 * forever.
619c5cb6
VZ
174 * On the other hand we need an rmb() here to ensure the proper
175 * ordering of bit testing in the following
176 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
177 */
178 smp_mb();
179
9f6c9258
DK
180 if (unlikely(netif_tx_queue_stopped(txq))) {
181 /* Taking tx_lock() is needed to prevent reenabling the queue
182 * while it's empty. This could have happen if rx_action() gets
183 * suspended in bnx2x_tx_int() after the condition before
184 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
185 *
186 * stops the queue->sees fresh tx_bd_cons->releases the queue->
187 * sends some packets consuming the whole queue again->
188 * stops the queue
189 */
190
191 __netif_tx_lock(txq, smp_processor_id());
192
193 if ((netif_tx_queue_stopped(txq)) &&
194 (bp->state == BNX2X_STATE_OPEN) &&
6383c0b3 195 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
9f6c9258
DK
196 netif_tx_wake_queue(txq);
197
198 __netif_tx_unlock(txq);
199 }
200 return 0;
201}
202
203static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
204 u16 idx)
205{
206 u16 last_max = fp->last_max_sge;
207
208 if (SUB_S16(idx, last_max) > 0)
209 fp->last_max_sge = idx;
210}
211
212static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
213 struct eth_fast_path_rx_cqe *fp_cqe)
214{
215 struct bnx2x *bp = fp->bp;
216 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
217 le16_to_cpu(fp_cqe->len_on_bd)) >>
218 SGE_PAGE_SHIFT;
219 u16 last_max, last_elem, first_elem;
220 u16 delta = 0;
221 u16 i;
222
223 if (!sge_len)
224 return;
225
226 /* First mark all used pages */
227 for (i = 0; i < sge_len; i++)
619c5cb6 228 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
523224a3 229 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
230
231 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 232 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
233
234 /* Here we assume that the last SGE index is the biggest */
235 prefetch((void *)(fp->sge_mask));
523224a3
DK
236 bnx2x_update_last_max_sge(fp,
237 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
238
239 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
240 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
241 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
242
243 /* If ring is not full */
244 if (last_elem + 1 != first_elem)
245 last_elem++;
246
247 /* Now update the prod */
248 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
249 if (likely(fp->sge_mask[i]))
250 break;
251
619c5cb6
VZ
252 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
253 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
254 }
255
256 if (delta > 0) {
257 fp->rx_sge_prod += delta;
258 /* clear page-end entries */
259 bnx2x_clear_sge_mask_next_elems(fp);
260 }
261
262 DP(NETIF_MSG_RX_STATUS,
263 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
264 fp->last_max_sge, fp->rx_sge_prod);
265}
266
e52fcb24
ED
267/* Set Toeplitz hash value in the skb using the value from the
268 * CQE (calculated by HW).
269 */
270static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
271 const struct eth_fast_path_rx_cqe *cqe)
272{
273 /* Set Toeplitz hash from CQE */
274 if ((bp->dev->features & NETIF_F_RXHASH) &&
275 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
276 return le32_to_cpu(cqe->rss_hash_result);
277 return 0;
278}
279
9f6c9258 280static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 281 u16 cons, u16 prod,
619c5cb6 282 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
283{
284 struct bnx2x *bp = fp->bp;
285 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
286 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
287 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
288 dma_addr_t mapping;
619c5cb6
VZ
289 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
290 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 291
619c5cb6
VZ
292 /* print error if current state != stop */
293 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
294 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
295
e52fcb24 296 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 297 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 298 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
299 fp->rx_buf_size, DMA_FROM_DEVICE);
300 /*
301 * ...if it fails - move the skb from the consumer to the producer
302 * and set the current aggregation state as ERROR to drop it
303 * when TPA_STOP arrives.
304 */
305
306 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
307 /* Move the BD from the consumer to the producer */
e52fcb24 308 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
309 tpa_info->tpa_state = BNX2X_TPA_ERROR;
310 return;
311 }
9f6c9258 312
e52fcb24
ED
313 /* move empty data from pool to prod */
314 prod_rx_buf->data = first_buf->data;
619c5cb6 315 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 316 /* point prod_bd to new data */
9f6c9258
DK
317 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
318 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
319
619c5cb6
VZ
320 /* move partial skb from cons to pool (don't unmap yet) */
321 *first_buf = *cons_rx_buf;
322
323 /* mark bin state as START */
324 tpa_info->parsing_flags =
325 le16_to_cpu(cqe->pars_flags.flags);
326 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
327 tpa_info->tpa_state = BNX2X_TPA_START;
328 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
329 tpa_info->placement_offset = cqe->placement_offset;
e52fcb24 330 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
619c5cb6 331
9f6c9258
DK
332#ifdef BNX2X_STOP_ON_ERROR
333 fp->tpa_queue_used |= (1 << queue);
334#ifdef _ASM_GENERIC_INT_L64_H
335 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
336#else
337 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
338#endif
339 fp->tpa_queue_used);
340#endif
341}
342
e4e3c02a
VZ
343/* Timestamp option length allowed for TPA aggregation:
344 *
345 * nop nop kind length echo val
346 */
347#define TPA_TSTAMP_OPT_LEN 12
348/**
e8920674 349 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 350 *
e8920674
DK
351 * @bp: driver handle
352 * @parsing_flags: parsing flags from the START CQE
353 * @len_on_bd: total length of the first packet for the
354 * aggregation.
355 *
356 * Approximate value of the MSS for this aggregation calculated using
357 * the first packet of it.
e4e3c02a
VZ
358 */
359static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
360 u16 len_on_bd)
361{
619c5cb6
VZ
362 /*
363 * TPA arrgregation won't have either IP options or TCP options
364 * other than timestamp or IPv6 extension headers.
e4e3c02a 365 */
619c5cb6
VZ
366 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
367
368 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
369 PRS_FLAG_OVERETH_IPV6)
370 hdrs_len += sizeof(struct ipv6hdr);
371 else /* IPv4 */
372 hdrs_len += sizeof(struct iphdr);
e4e3c02a
VZ
373
374
375 /* Check if there was a TCP timestamp, if there is it's will
376 * always be 12 bytes length: nop nop kind length echo val.
377 *
378 * Otherwise FW would close the aggregation.
379 */
380 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
381 hdrs_len += TPA_TSTAMP_OPT_LEN;
382
383 return len_on_bd - hdrs_len;
384}
385
9f6c9258 386static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619c5cb6
VZ
387 u16 queue, struct sk_buff *skb,
388 struct eth_end_agg_rx_cqe *cqe,
389 u16 cqe_idx)
9f6c9258
DK
390{
391 struct sw_rx_page *rx_pg, old_rx_pg;
9f6c9258
DK
392 u32 i, frag_len, frag_size, pages;
393 int err;
394 int j;
619c5cb6
VZ
395 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
396 u16 len_on_bd = tpa_info->len_on_bd;
9f6c9258 397
619c5cb6 398 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
9f6c9258
DK
399 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
400
401 /* This is needed in order to enable forwarding support */
402 if (frag_size)
619c5cb6
VZ
403 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
404 tpa_info->parsing_flags, len_on_bd);
9f6c9258
DK
405
406#ifdef BNX2X_STOP_ON_ERROR
407 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
408 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
409 pages, cqe_idx);
619c5cb6 410 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
411 bnx2x_panic();
412 return -EINVAL;
413 }
414#endif
415
416 /* Run through the SGL and compose the fragmented skb */
417 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 418 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
419
420 /* FW gives the indices of the SGE as if the ring is an array
421 (meaning that "next" element will consume 2 indices) */
422 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
423 rx_pg = &fp->rx_page_ring[sge_idx];
424 old_rx_pg = *rx_pg;
425
426 /* If we fail to allocate a substitute page, we simply stop
427 where we are and drop the whole packet */
428 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
429 if (unlikely(err)) {
430 fp->eth_q_stats.rx_skb_alloc_failed++;
431 return err;
432 }
433
434 /* Unmap the page as we r going to pass it to the stack */
435 dma_unmap_page(&bp->pdev->dev,
436 dma_unmap_addr(&old_rx_pg, mapping),
437 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
438
439 /* Add one frag and update the appropriate fields in the skb */
440 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
441
442 skb->data_len += frag_len;
e1ac50f6 443 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
9f6c9258
DK
444 skb->len += frag_len;
445
446 frag_size -= frag_len;
447 }
448
449 return 0;
450}
451
452static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619c5cb6 453 u16 queue, struct eth_end_agg_rx_cqe *cqe,
9f6c9258
DK
454 u16 cqe_idx)
455{
619c5cb6
VZ
456 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
457 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
e52fcb24 458 u32 pad = tpa_info->placement_offset;
619c5cb6 459 u16 len = tpa_info->len_on_bd;
e52fcb24
ED
460 struct sk_buff *skb = NULL;
461 u8 *data = rx_buf->data;
9f6c9258 462 /* alloc new skb */
e52fcb24 463 u8 *new_data;
619c5cb6
VZ
464 u8 old_tpa_state = tpa_info->tpa_state;
465
466 tpa_info->tpa_state = BNX2X_TPA_STOP;
467
468 /* If we there was an error during the handling of the TPA_START -
469 * drop this aggregation.
470 */
471 if (old_tpa_state == BNX2X_TPA_ERROR)
472 goto drop;
473
e52fcb24
ED
474 /* Try to allocate the new data */
475 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
9f6c9258
DK
476
477 /* Unmap skb in the pool anyway, as we are going to change
478 pool entry status to BNX2X_TPA_STOP even if new skb allocation
479 fails. */
480 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 481 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24
ED
482 if (likely(new_data))
483 skb = build_skb(data);
9f6c9258 484
e52fcb24 485 if (likely(skb)) {
9f6c9258 486#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 487 if (pad + len > fp->rx_buf_size) {
9f6c9258
DK
488 BNX2X_ERR("skb_put is about to fail... "
489 "pad %d len %d rx_buf_size %d\n",
a8c94b91 490 pad, len, fp->rx_buf_size);
9f6c9258
DK
491 bnx2x_panic();
492 return;
493 }
494#endif
495
e52fcb24 496 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 497 skb_put(skb, len);
e52fcb24 498 skb->rxhash = tpa_info->rxhash;
9f6c9258
DK
499
500 skb->protocol = eth_type_trans(skb, bp->dev);
501 skb->ip_summed = CHECKSUM_UNNECESSARY;
502
619c5cb6
VZ
503 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
504 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
505 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9bcc0893 506 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
507 } else {
508 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
509 " - dropping packet!\n");
40955532 510 dev_kfree_skb_any(skb);
9f6c9258
DK
511 }
512
513
e52fcb24
ED
514 /* put new data in bin */
515 rx_buf->data = new_data;
9f6c9258 516
619c5cb6 517 return;
9f6c9258 518 }
3f61cd87 519 kfree(new_data);
619c5cb6
VZ
520drop:
521 /* drop the packet and keep the buffer in the bin */
522 DP(NETIF_MSG_RX_STATUS,
523 "Failed to allocate or map a new skb - dropping packet!\n");
524 fp->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
525}
526
9f6c9258
DK
527
528int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
529{
530 struct bnx2x *bp = fp->bp;
531 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
532 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
533 int rx_pkt = 0;
534
535#ifdef BNX2X_STOP_ON_ERROR
536 if (unlikely(bp->panic))
537 return 0;
538#endif
539
540 /* CQ "next element" is of the size of the regular element,
541 that's why it's ok here */
542 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
543 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
544 hw_comp_cons++;
545
546 bd_cons = fp->rx_bd_cons;
547 bd_prod = fp->rx_bd_prod;
548 bd_prod_fw = bd_prod;
549 sw_comp_cons = fp->rx_comp_cons;
550 sw_comp_prod = fp->rx_comp_prod;
551
552 /* Memory barrier necessary as speculative reads of the rx
553 * buffer can be ahead of the index in the status block
554 */
555 rmb();
556
557 DP(NETIF_MSG_RX_STATUS,
558 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
559 fp->index, hw_comp_cons, sw_comp_cons);
560
561 while (sw_comp_cons != hw_comp_cons) {
562 struct sw_rx_bd *rx_buf = NULL;
563 struct sk_buff *skb;
564 union eth_rx_cqe *cqe;
619c5cb6 565 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 566 u8 cqe_fp_flags;
619c5cb6 567 enum eth_rx_cqe_type cqe_fp_type;
9f6c9258 568 u16 len, pad;
e52fcb24 569 u8 *data;
9f6c9258 570
619c5cb6
VZ
571#ifdef BNX2X_STOP_ON_ERROR
572 if (unlikely(bp->panic))
573 return 0;
574#endif
575
9f6c9258
DK
576 comp_ring_cons = RCQ_BD(sw_comp_cons);
577 bd_prod = RX_BD(bd_prod);
578 bd_cons = RX_BD(bd_cons);
579
9f6c9258 580 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
581 cqe_fp = &cqe->fast_path_cqe;
582 cqe_fp_flags = cqe_fp->type_error_flags;
583 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258
DK
584
585 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
586 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
587 cqe_fp_flags, cqe_fp->status_flags,
588 le32_to_cpu(cqe_fp->rss_hash_result),
589 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
9f6c9258
DK
590
591 /* is this a slowpath msg? */
619c5cb6 592 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
593 bnx2x_sp_event(fp, cqe);
594 goto next_cqe;
e52fcb24
ED
595 }
596 rx_buf = &fp->rx_buf_ring[bd_cons];
597 data = rx_buf->data;
9f6c9258 598
e52fcb24 599 if (!CQE_TYPE_FAST(cqe_fp_type)) {
619c5cb6 600#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
601 /* sanity check */
602 if (fp->disable_tpa &&
603 (CQE_TYPE_START(cqe_fp_type) ||
604 CQE_TYPE_STOP(cqe_fp_type)))
605 BNX2X_ERR("START/STOP packet while "
606 "disable_tpa type %x\n",
607 CQE_TYPE(cqe_fp_type));
619c5cb6 608#endif
9f6c9258 609
e52fcb24
ED
610 if (CQE_TYPE_START(cqe_fp_type)) {
611 u16 queue = cqe_fp->queue_index;
612 DP(NETIF_MSG_RX_STATUS,
613 "calling tpa_start on queue %d\n",
614 queue);
9f6c9258 615
e52fcb24
ED
616 bnx2x_tpa_start(fp, queue,
617 bd_cons, bd_prod,
618 cqe_fp);
619 goto next_rx;
620 } else {
621 u16 queue =
622 cqe->end_agg_cqe.queue_index;
623 DP(NETIF_MSG_RX_STATUS,
624 "calling tpa_stop on queue %d\n",
625 queue);
626
627 bnx2x_tpa_stop(bp, fp, queue,
628 &cqe->end_agg_cqe,
629 comp_ring_cons);
9f6c9258 630#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
631 if (bp->panic)
632 return 0;
9f6c9258
DK
633#endif
634
e52fcb24
ED
635 bnx2x_update_sge_prod(fp, cqe_fp);
636 goto next_cqe;
9f6c9258 637 }
e52fcb24
ED
638 }
639 /* non TPA */
640 len = le16_to_cpu(cqe_fp->pkt_len);
641 pad = cqe_fp->placement_offset;
642 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 643 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
644 pad + RX_COPY_THRESH,
645 DMA_FROM_DEVICE);
646 pad += NET_SKB_PAD;
647 prefetch(data + pad); /* speedup eth_type_trans() */
648 /* is this an error packet? */
649 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
650 DP(NETIF_MSG_RX_ERR,
651 "ERROR flags %x rx packet %u\n",
652 cqe_fp_flags, sw_comp_cons);
653 fp->eth_q_stats.rx_err_discard_pkt++;
654 goto reuse_rx;
655 }
9f6c9258 656
e52fcb24
ED
657 /* Since we don't have a jumbo ring
658 * copy small packets if mtu > 1500
659 */
660 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
661 (len <= RX_COPY_THRESH)) {
662 skb = netdev_alloc_skb_ip_align(bp->dev, len);
663 if (skb == NULL) {
9f6c9258 664 DP(NETIF_MSG_RX_ERR,
e52fcb24
ED
665 "ERROR packet dropped because of alloc failure\n");
666 fp->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
667 goto reuse_rx;
668 }
e52fcb24
ED
669 memcpy(skb->data, data + pad, len);
670 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
671 } else {
672 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 673 dma_unmap_single(&bp->pdev->dev,
e52fcb24 674 dma_unmap_addr(rx_buf, mapping),
a8c94b91 675 fp->rx_buf_size,
9f6c9258 676 DMA_FROM_DEVICE);
e52fcb24
ED
677 skb = build_skb(data);
678 if (unlikely(!skb)) {
679 kfree(data);
680 fp->eth_q_stats.rx_skb_alloc_failed++;
681 goto next_rx;
682 }
9f6c9258 683 skb_reserve(skb, pad);
9f6c9258
DK
684 } else {
685 DP(NETIF_MSG_RX_ERR,
686 "ERROR packet dropped because "
687 "of alloc failure\n");
688 fp->eth_q_stats.rx_skb_alloc_failed++;
689reuse_rx:
e52fcb24 690 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
691 goto next_rx;
692 }
036d2df9 693 }
9f6c9258 694
036d2df9
DK
695 skb_put(skb, len);
696 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 697
036d2df9
DK
698 /* Set Toeplitz hash for a none-LRO skb */
699 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
9f6c9258 700
036d2df9 701 skb_checksum_none_assert(skb);
f85582f8 702
036d2df9 703 if (bp->dev->features & NETIF_F_RXCSUM) {
619c5cb6 704
036d2df9
DK
705 if (likely(BNX2X_RX_CSUM_OK(cqe)))
706 skb->ip_summed = CHECKSUM_UNNECESSARY;
707 else
708 fp->eth_q_stats.hw_csum_err++;
9f6c9258
DK
709 }
710
f233cafe 711 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 712
619c5cb6
VZ
713 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
714 PARSING_FLAGS_VLAN)
9bcc0893 715 __vlan_hwaccel_put_tag(skb,
619c5cb6 716 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 717 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
718
719
720next_rx:
e52fcb24 721 rx_buf->data = NULL;
9f6c9258
DK
722
723 bd_cons = NEXT_RX_IDX(bd_cons);
724 bd_prod = NEXT_RX_IDX(bd_prod);
725 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
726 rx_pkt++;
727next_cqe:
728 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
729 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
730
731 if (rx_pkt == budget)
732 break;
733 } /* while */
734
735 fp->rx_bd_cons = bd_cons;
736 fp->rx_bd_prod = bd_prod_fw;
737 fp->rx_comp_cons = sw_comp_cons;
738 fp->rx_comp_prod = sw_comp_prod;
739
740 /* Update producers */
741 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
742 fp->rx_sge_prod);
743
744 fp->rx_pkt += rx_pkt;
745 fp->rx_calls++;
746
747 return rx_pkt;
748}
749
750static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
751{
752 struct bnx2x_fastpath *fp = fp_cookie;
753 struct bnx2x *bp = fp->bp;
6383c0b3 754 u8 cos;
9f6c9258 755
523224a3
DK
756 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
757 "[fp %d fw_sd %d igusb %d]\n",
758 fp->index, fp->fw_sb_id, fp->igu_sb_id);
759 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
760
761#ifdef BNX2X_STOP_ON_ERROR
762 if (unlikely(bp->panic))
763 return IRQ_HANDLED;
764#endif
765
766 /* Handle Rx and Tx according to MSI-X vector */
767 prefetch(fp->rx_cons_sb);
6383c0b3
AE
768
769 for_each_cos_in_tx_queue(fp, cos)
770 prefetch(fp->txdata[cos].tx_cons_sb);
771
523224a3 772 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
773 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
774
775 return IRQ_HANDLED;
776}
777
9f6c9258
DK
778/* HW Lock for shared dual port PHYs */
779void bnx2x_acquire_phy_lock(struct bnx2x *bp)
780{
781 mutex_lock(&bp->port.phy_mutex);
782
783 if (bp->port.need_hw_lock)
784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
785}
786
787void bnx2x_release_phy_lock(struct bnx2x *bp)
788{
789 if (bp->port.need_hw_lock)
790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
791
792 mutex_unlock(&bp->port.phy_mutex);
793}
794
0793f83f
DK
795/* calculates MF speed according to current linespeed and MF configuration */
796u16 bnx2x_get_mf_speed(struct bnx2x *bp)
797{
798 u16 line_speed = bp->link_vars.line_speed;
799 if (IS_MF(bp)) {
faa6fcbb
DK
800 u16 maxCfg = bnx2x_extract_max_cfg(bp,
801 bp->mf_config[BP_VN(bp)]);
802
803 /* Calculate the current MAX line speed limit for the MF
804 * devices
0793f83f 805 */
faa6fcbb
DK
806 if (IS_MF_SI(bp))
807 line_speed = (line_speed * maxCfg) / 100;
808 else { /* SD mode */
0793f83f
DK
809 u16 vn_max_rate = maxCfg * 100;
810
811 if (vn_max_rate < line_speed)
812 line_speed = vn_max_rate;
faa6fcbb 813 }
0793f83f
DK
814 }
815
816 return line_speed;
817}
818
2ae17f66
VZ
819/**
820 * bnx2x_fill_report_data - fill link report data to report
821 *
822 * @bp: driver handle
823 * @data: link state to update
824 *
825 * It uses a none-atomic bit operations because is called under the mutex.
826 */
827static inline void bnx2x_fill_report_data(struct bnx2x *bp,
828 struct bnx2x_link_report_data *data)
829{
830 u16 line_speed = bnx2x_get_mf_speed(bp);
831
832 memset(data, 0, sizeof(*data));
833
834 /* Fill the report data: efective line speed */
835 data->line_speed = line_speed;
836
837 /* Link is down */
838 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
839 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
840 &data->link_report_flags);
841
842 /* Full DUPLEX */
843 if (bp->link_vars.duplex == DUPLEX_FULL)
844 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
845
846 /* Rx Flow Control is ON */
847 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
848 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
849
850 /* Tx Flow Control is ON */
851 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
852 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
853}
854
855/**
856 * bnx2x_link_report - report link status to OS.
857 *
858 * @bp: driver handle
859 *
860 * Calls the __bnx2x_link_report() under the same locking scheme
861 * as a link/PHY state managing code to ensure a consistent link
862 * reporting.
863 */
864
9f6c9258
DK
865void bnx2x_link_report(struct bnx2x *bp)
866{
2ae17f66
VZ
867 bnx2x_acquire_phy_lock(bp);
868 __bnx2x_link_report(bp);
869 bnx2x_release_phy_lock(bp);
870}
9f6c9258 871
2ae17f66
VZ
872/**
873 * __bnx2x_link_report - report link status to OS.
874 *
875 * @bp: driver handle
876 *
877 * None atomic inmlementation.
878 * Should be called under the phy_lock.
879 */
880void __bnx2x_link_report(struct bnx2x *bp)
881{
882 struct bnx2x_link_report_data cur_data;
9f6c9258 883
2ae17f66
VZ
884 /* reread mf_cfg */
885 if (!CHIP_IS_E1(bp))
886 bnx2x_read_mf_cfg(bp);
887
888 /* Read the current link report info */
889 bnx2x_fill_report_data(bp, &cur_data);
890
891 /* Don't report link down or exactly the same link status twice */
892 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
893 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
894 &bp->last_reported_link.link_report_flags) &&
895 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
896 &cur_data.link_report_flags)))
897 return;
898
899 bp->link_cnt++;
9f6c9258 900
2ae17f66
VZ
901 /* We are going to report a new link parameters now -
902 * remember the current data for the next time.
903 */
904 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 905
2ae17f66
VZ
906 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
907 &cur_data.link_report_flags)) {
908 netif_carrier_off(bp->dev);
909 netdev_err(bp->dev, "NIC Link is Down\n");
910 return;
911 } else {
94f05b0f
JP
912 const char *duplex;
913 const char *flow;
914
2ae17f66 915 netif_carrier_on(bp->dev);
9f6c9258 916
2ae17f66
VZ
917 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
918 &cur_data.link_report_flags))
94f05b0f 919 duplex = "full";
9f6c9258 920 else
94f05b0f 921 duplex = "half";
9f6c9258 922
2ae17f66
VZ
923 /* Handle the FC at the end so that only these flags would be
924 * possibly set. This way we may easily check if there is no FC
925 * enabled.
926 */
927 if (cur_data.link_report_flags) {
928 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
929 &cur_data.link_report_flags)) {
2ae17f66
VZ
930 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
931 &cur_data.link_report_flags))
94f05b0f
JP
932 flow = "ON - receive & transmit";
933 else
934 flow = "ON - receive";
9f6c9258 935 } else {
94f05b0f 936 flow = "ON - transmit";
9f6c9258 937 }
94f05b0f
JP
938 } else {
939 flow = "none";
9f6c9258 940 }
94f05b0f
JP
941 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
942 cur_data.line_speed, duplex, flow);
9f6c9258
DK
943 }
944}
945
946void bnx2x_init_rx_rings(struct bnx2x *bp)
947{
948 int func = BP_FUNC(bp);
523224a3 949 u16 ring_prod;
9f6c9258 950 int i, j;
25141580 951
b3b83c3f 952 /* Allocate TPA resources */
ec6ba945 953 for_each_rx_queue(bp, j) {
523224a3 954 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 955
a8c94b91
VZ
956 DP(NETIF_MSG_IFUP,
957 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
958
523224a3 959 if (!fp->disable_tpa) {
619c5cb6 960 /* Fill the per-aggregtion pool */
dfacf138 961 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
962 struct bnx2x_agg_info *tpa_info =
963 &fp->tpa_info[i];
964 struct sw_rx_bd *first_buf =
965 &tpa_info->first_buf;
966
e52fcb24
ED
967 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
968 GFP_ATOMIC);
969 if (!first_buf->data) {
9f6c9258
DK
970 BNX2X_ERR("Failed to allocate TPA "
971 "skb pool for queue[%d] - "
972 "disabling TPA on this "
973 "queue!\n", j);
974 bnx2x_free_tpa_pool(bp, fp, i);
975 fp->disable_tpa = 1;
976 break;
977 }
619c5cb6
VZ
978 dma_unmap_addr_set(first_buf, mapping, 0);
979 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 980 }
523224a3
DK
981
982 /* "next page" elements initialization */
983 bnx2x_set_next_page_sgl(fp);
984
985 /* set SGEs bit mask */
986 bnx2x_init_sge_ring_bit_mask(fp);
987
988 /* Allocate SGEs and initialize the ring elements */
989 for (i = 0, ring_prod = 0;
990 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
991
992 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
993 BNX2X_ERR("was only able to allocate "
994 "%d rx sges\n", i);
619c5cb6
VZ
995 BNX2X_ERR("disabling TPA for "
996 "queue[%d]\n", j);
523224a3 997 /* Cleanup already allocated elements */
619c5cb6
VZ
998 bnx2x_free_rx_sge_range(bp, fp,
999 ring_prod);
1000 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1001 MAX_AGG_QS(bp));
523224a3
DK
1002 fp->disable_tpa = 1;
1003 ring_prod = 0;
1004 break;
1005 }
1006 ring_prod = NEXT_SGE_IDX(ring_prod);
1007 }
1008
1009 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1010 }
1011 }
1012
ec6ba945 1013 for_each_rx_queue(bp, j) {
9f6c9258
DK
1014 struct bnx2x_fastpath *fp = &bp->fp[j];
1015
1016 fp->rx_bd_cons = 0;
9f6c9258 1017
b3b83c3f
DK
1018 /* Activate BD ring */
1019 /* Warning!
1020 * this will generate an interrupt (to the TSTORM)
1021 * must only be done after chip is initialized
1022 */
1023 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1024 fp->rx_sge_prod);
9f6c9258 1025
9f6c9258
DK
1026 if (j != 0)
1027 continue;
1028
619c5cb6 1029 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1030 REG_WR(bp, BAR_USTRORM_INTMEM +
1031 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1032 U64_LO(fp->rx_comp_mapping));
1033 REG_WR(bp, BAR_USTRORM_INTMEM +
1034 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1035 U64_HI(fp->rx_comp_mapping));
1036 }
9f6c9258
DK
1037 }
1038}
f85582f8 1039
9f6c9258
DK
1040static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1041{
1042 int i;
6383c0b3 1043 u8 cos;
9f6c9258 1044
ec6ba945 1045 for_each_tx_queue(bp, i) {
9f6c9258 1046 struct bnx2x_fastpath *fp = &bp->fp[i];
6383c0b3
AE
1047 for_each_cos_in_tx_queue(fp, cos) {
1048 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
2df1a70a 1049 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1050
6383c0b3
AE
1051 u16 sw_prod = txdata->tx_pkt_prod;
1052 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1053
6383c0b3 1054 while (sw_cons != sw_prod) {
2df1a70a
TH
1055 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1056 &pkts_compl, &bytes_compl);
6383c0b3
AE
1057 sw_cons++;
1058 }
2df1a70a
TH
1059 netdev_tx_reset_queue(
1060 netdev_get_tx_queue(bp->dev, txdata->txq_index));
9f6c9258
DK
1061 }
1062 }
1063}
1064
b3b83c3f
DK
1065static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1066{
1067 struct bnx2x *bp = fp->bp;
1068 int i;
1069
1070 /* ring wasn't allocated */
1071 if (fp->rx_buf_ring == NULL)
1072 return;
1073
1074 for (i = 0; i < NUM_RX_BD; i++) {
1075 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1076 u8 *data = rx_buf->data;
b3b83c3f 1077
e52fcb24 1078 if (data == NULL)
b3b83c3f 1079 continue;
b3b83c3f
DK
1080 dma_unmap_single(&bp->pdev->dev,
1081 dma_unmap_addr(rx_buf, mapping),
1082 fp->rx_buf_size, DMA_FROM_DEVICE);
1083
e52fcb24
ED
1084 rx_buf->data = NULL;
1085 kfree(data);
b3b83c3f
DK
1086 }
1087}
1088
9f6c9258
DK
1089static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1090{
b3b83c3f 1091 int j;
9f6c9258 1092
ec6ba945 1093 for_each_rx_queue(bp, j) {
9f6c9258
DK
1094 struct bnx2x_fastpath *fp = &bp->fp[j];
1095
b3b83c3f 1096 bnx2x_free_rx_bds(fp);
9f6c9258 1097
9f6c9258 1098 if (!fp->disable_tpa)
dfacf138 1099 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1100 }
1101}
1102
1103void bnx2x_free_skbs(struct bnx2x *bp)
1104{
1105 bnx2x_free_tx_skbs(bp);
1106 bnx2x_free_rx_skbs(bp);
1107}
1108
e3835b99
DK
1109void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1110{
1111 /* load old values */
1112 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1113
1114 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1115 /* leave all but MAX value */
1116 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1117
1118 /* set new MAX value */
1119 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1120 & FUNC_MF_CFG_MAX_BW_MASK;
1121
1122 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1123 }
1124}
1125
ca92429f
DK
1126/**
1127 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1128 *
1129 * @bp: driver handle
1130 * @nvecs: number of vectors to be released
1131 */
1132static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1133{
ca92429f 1134 int i, offset = 0;
9f6c9258 1135
ca92429f
DK
1136 if (nvecs == offset)
1137 return;
1138 free_irq(bp->msix_table[offset].vector, bp->dev);
9f6c9258 1139 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
ca92429f
DK
1140 bp->msix_table[offset].vector);
1141 offset++;
9f6c9258 1142#ifdef BCM_CNIC
ca92429f
DK
1143 if (nvecs == offset)
1144 return;
9f6c9258
DK
1145 offset++;
1146#endif
ca92429f 1147
ec6ba945 1148 for_each_eth_queue(bp, i) {
ca92429f
DK
1149 if (nvecs == offset)
1150 return;
1151 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1152 "irq\n", i, bp->msix_table[offset].vector);
9f6c9258 1153
ca92429f 1154 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1155 }
1156}
1157
d6214d7a 1158void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1159{
d6214d7a 1160 if (bp->flags & USING_MSIX_FLAG)
ca92429f 1161 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
6383c0b3 1162 CNIC_PRESENT + 1);
d6214d7a
DK
1163 else if (bp->flags & USING_MSI_FLAG)
1164 free_irq(bp->pdev->irq, bp->dev);
1165 else
9f6c9258
DK
1166 free_irq(bp->pdev->irq, bp->dev);
1167}
1168
d6214d7a 1169int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1170{
d6214d7a 1171 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1172
d6214d7a
DK
1173 bp->msix_table[msix_vec].entry = msix_vec;
1174 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1175 bp->msix_table[0].entry);
1176 msix_vec++;
9f6c9258
DK
1177
1178#ifdef BCM_CNIC
d6214d7a
DK
1179 bp->msix_table[msix_vec].entry = msix_vec;
1180 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1181 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1182 msix_vec++;
9f6c9258 1183#endif
6383c0b3 1184 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1185 for_each_eth_queue(bp, i) {
d6214d7a 1186 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1187 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1188 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1189 msix_vec++;
9f6c9258
DK
1190 }
1191
6383c0b3 1192 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
d6214d7a
DK
1193
1194 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1195
1196 /*
1197 * reconfigure number of tx/rx queues according to available
1198 * MSI-X vectors
1199 */
1200 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1201 /* how less vectors we will have? */
1202 int diff = req_cnt - rc;
9f6c9258
DK
1203
1204 DP(NETIF_MSG_IFUP,
1205 "Trying to use less MSI-X vectors: %d\n", rc);
1206
1207 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1208
1209 if (rc) {
1210 DP(NETIF_MSG_IFUP,
1211 "MSI-X is not attainable rc %d\n", rc);
1212 return rc;
1213 }
d6214d7a
DK
1214 /*
1215 * decrease number of queues by number of unallocated entries
1216 */
1217 bp->num_queues -= diff;
9f6c9258
DK
1218
1219 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1220 bp->num_queues);
1221 } else if (rc) {
d6214d7a
DK
1222 /* fall to INTx if not enough memory */
1223 if (rc == -ENOMEM)
1224 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1225 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1226 return rc;
1227 }
1228
1229 bp->flags |= USING_MSIX_FLAG;
1230
1231 return 0;
1232}
1233
1234static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1235{
ca92429f 1236 int i, rc, offset = 0;
9f6c9258 1237
ca92429f
DK
1238 rc = request_irq(bp->msix_table[offset++].vector,
1239 bnx2x_msix_sp_int, 0,
9f6c9258
DK
1240 bp->dev->name, bp->dev);
1241 if (rc) {
1242 BNX2X_ERR("request sp irq failed\n");
1243 return -EBUSY;
1244 }
1245
1246#ifdef BCM_CNIC
1247 offset++;
1248#endif
ec6ba945 1249 for_each_eth_queue(bp, i) {
9f6c9258
DK
1250 struct bnx2x_fastpath *fp = &bp->fp[i];
1251 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1252 bp->dev->name, i);
1253
d6214d7a 1254 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1255 bnx2x_msix_fp_int, 0, fp->name, fp);
1256 if (rc) {
ca92429f
DK
1257 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1258 bp->msix_table[offset].vector, rc);
1259 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1260 return -EBUSY;
1261 }
1262
d6214d7a 1263 offset++;
9f6c9258
DK
1264 }
1265
ec6ba945 1266 i = BNX2X_NUM_ETH_QUEUES(bp);
6383c0b3 1267 offset = 1 + CNIC_PRESENT;
9f6c9258
DK
1268 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1269 " ... fp[%d] %d\n",
1270 bp->msix_table[0].vector,
1271 0, bp->msix_table[offset].vector,
1272 i - 1, bp->msix_table[offset + i - 1].vector);
1273
1274 return 0;
1275}
1276
d6214d7a 1277int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1278{
1279 int rc;
1280
1281 rc = pci_enable_msi(bp->pdev);
1282 if (rc) {
1283 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1284 return -1;
1285 }
1286 bp->flags |= USING_MSI_FLAG;
1287
1288 return 0;
1289}
1290
1291static int bnx2x_req_irq(struct bnx2x *bp)
1292{
1293 unsigned long flags;
1294 int rc;
1295
1296 if (bp->flags & USING_MSI_FLAG)
1297 flags = 0;
1298 else
1299 flags = IRQF_SHARED;
1300
1301 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1302 bp->dev->name, bp->dev);
9f6c9258
DK
1303 return rc;
1304}
1305
619c5cb6
VZ
1306static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1307{
1308 int rc = 0;
1309 if (bp->flags & USING_MSIX_FLAG) {
1310 rc = bnx2x_req_msix_irqs(bp);
1311 if (rc)
1312 return rc;
1313 } else {
1314 bnx2x_ack_int(bp);
1315 rc = bnx2x_req_irq(bp);
1316 if (rc) {
1317 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1318 return rc;
1319 }
1320 if (bp->flags & USING_MSI_FLAG) {
1321 bp->dev->irq = bp->pdev->irq;
1322 netdev_info(bp->dev, "using MSI IRQ %d\n",
1323 bp->pdev->irq);
1324 }
1325 }
1326
1327 return 0;
1328}
1329
1330static inline void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1331{
1332 int i;
1333
619c5cb6 1334 for_each_rx_queue(bp, i)
9f6c9258
DK
1335 napi_enable(&bnx2x_fp(bp, i, napi));
1336}
1337
619c5cb6 1338static inline void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1339{
1340 int i;
1341
619c5cb6 1342 for_each_rx_queue(bp, i)
9f6c9258
DK
1343 napi_disable(&bnx2x_fp(bp, i, napi));
1344}
1345
1346void bnx2x_netif_start(struct bnx2x *bp)
1347{
4b7ed897
DK
1348 if (netif_running(bp->dev)) {
1349 bnx2x_napi_enable(bp);
1350 bnx2x_int_enable(bp);
1351 if (bp->state == BNX2X_STATE_OPEN)
1352 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1353 }
1354}
1355
1356void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1357{
1358 bnx2x_int_disable_sync(bp, disable_hw);
1359 bnx2x_napi_disable(bp);
9f6c9258 1360}
9f6c9258 1361
8307fa3e
VZ
1362u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1363{
8307fa3e 1364 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1365
faa28314 1366#ifdef BCM_CNIC
cdb9d6ae 1367 if (!NO_FCOE(bp)) {
8307fa3e
VZ
1368 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1369 u16 ether_type = ntohs(hdr->h_proto);
1370
1371 /* Skip VLAN tag if present */
1372 if (ether_type == ETH_P_8021Q) {
1373 struct vlan_ethhdr *vhdr =
1374 (struct vlan_ethhdr *)skb->data;
1375
1376 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1377 }
1378
1379 /* If ethertype is FCoE or FIP - use FCoE ring */
1380 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1381 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e
VZ
1382 }
1383#endif
cdb9d6ae 1384 /* select a non-FCoE queue */
6383c0b3 1385 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1386}
1387
d6214d7a
DK
1388void bnx2x_set_num_queues(struct bnx2x *bp)
1389{
1390 switch (bp->multi_mode) {
1391 case ETH_RSS_MODE_DISABLED:
9f6c9258 1392 bp->num_queues = 1;
d6214d7a
DK
1393 break;
1394 case ETH_RSS_MODE_REGULAR:
1395 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258 1396 break;
f85582f8 1397
9f6c9258 1398 default:
d6214d7a 1399 bp->num_queues = 1;
9f6c9258
DK
1400 break;
1401 }
ec6ba945 1402
614c76df
DK
1403#ifdef BCM_CNIC
1404 /* override in ISCSI SD mod */
1405 if (IS_MF_ISCSI_SD(bp))
1406 bp->num_queues = 1;
1407#endif
ec6ba945 1408 /* Add special queues */
6383c0b3 1409 bp->num_queues += NON_ETH_CONTEXT_USE;
ec6ba945
VZ
1410}
1411
cdb9d6ae
VZ
1412/**
1413 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1414 *
1415 * @bp: Driver handle
1416 *
1417 * We currently support for at most 16 Tx queues for each CoS thus we will
1418 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1419 * bp->max_cos.
1420 *
1421 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1422 * index after all ETH L2 indices.
1423 *
1424 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1425 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1426 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1427 *
1428 * The proper configuration of skb->queue_mapping is handled by
1429 * bnx2x_select_queue() and __skb_tx_hash().
1430 *
1431 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1432 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1433 */
ec6ba945
VZ
1434static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1435{
6383c0b3 1436 int rc, tx, rx;
ec6ba945 1437
6383c0b3
AE
1438 tx = MAX_TXQS_PER_COS * bp->max_cos;
1439 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1440
6383c0b3
AE
1441/* account for fcoe queue */
1442#ifdef BCM_CNIC
1443 if (!NO_FCOE(bp)) {
1444 rx += FCOE_PRESENT;
1445 tx += FCOE_PRESENT;
1446 }
ec6ba945 1447#endif
6383c0b3
AE
1448
1449 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1450 if (rc) {
1451 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1452 return rc;
1453 }
1454 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1455 if (rc) {
1456 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1457 return rc;
1458 }
1459
1460 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1461 tx, rx);
1462
ec6ba945
VZ
1463 return rc;
1464}
1465
a8c94b91
VZ
1466static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1467{
1468 int i;
1469
1470 for_each_queue(bp, i) {
1471 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1472 u32 mtu;
a8c94b91
VZ
1473
1474 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1475 if (IS_FCOE_IDX(i))
1476 /*
1477 * Although there are no IP frames expected to arrive to
1478 * this ring we still want to add an
1479 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1480 * overrun attack.
1481 */
e52fcb24 1482 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1483 else
e52fcb24
ED
1484 mtu = bp->dev->mtu;
1485 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1486 IP_HEADER_ALIGNMENT_PADDING +
1487 ETH_OVREHEAD +
1488 mtu +
1489 BNX2X_FW_RX_ALIGN_END;
1490 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
a8c94b91
VZ
1491 }
1492}
1493
619c5cb6
VZ
1494static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1495{
1496 int i;
1497 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1498 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1499
1500 /*
1501 * Prepare the inital contents fo the indirection table if RSS is
1502 * enabled
1503 */
1504 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1505 for (i = 0; i < sizeof(ind_table); i++)
1506 ind_table[i] =
278bc429
BH
1507 bp->fp->cl_id +
1508 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1509 }
1510
1511 /*
1512 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1513 * per-port, so if explicit configuration is needed , do it only
1514 * for a PMF.
1515 *
1516 * For 57712 and newer on the other hand it's a per-function
1517 * configuration.
1518 */
1519 return bnx2x_config_rss_pf(bp, ind_table,
1520 bp->port.pmf || !CHIP_IS_E1x(bp));
1521}
1522
1523int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1524{
1525 struct bnx2x_config_rss_params params = {0};
1526 int i;
1527
1528 /* Although RSS is meaningless when there is a single HW queue we
1529 * still need it enabled in order to have HW Rx hash generated.
1530 *
1531 * if (!is_eth_multi(bp))
1532 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1533 */
1534
1535 params.rss_obj = &bp->rss_conf_obj;
1536
1537 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1538
1539 /* RSS mode */
1540 switch (bp->multi_mode) {
1541 case ETH_RSS_MODE_DISABLED:
1542 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1543 break;
1544 case ETH_RSS_MODE_REGULAR:
1545 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1546 break;
1547 case ETH_RSS_MODE_VLAN_PRI:
1548 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1549 break;
1550 case ETH_RSS_MODE_E1HOV_PRI:
1551 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1552 break;
1553 case ETH_RSS_MODE_IP_DSCP:
1554 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1555 break;
1556 default:
1557 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1558 return -EINVAL;
1559 }
1560
1561 /* If RSS is enabled */
1562 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1563 /* RSS configuration */
1564 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1565 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1566 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1567 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1568
1569 /* Hash bits */
1570 params.rss_result_mask = MULTI_MASK;
1571
1572 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1573
1574 if (config_hash) {
1575 /* RSS keys */
1576 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1577 params.rss_key[i] = random32();
1578
1579 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1580 }
1581 }
1582
1583 return bnx2x_config_rss(bp, &params);
1584}
1585
1586static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1587{
1588 struct bnx2x_func_state_params func_params = {0};
1589
1590 /* Prepare parameters for function state transitions */
1591 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1592
1593 func_params.f_obj = &bp->func_obj;
1594 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1595
1596 func_params.params.hw_init.load_phase = load_code;
1597
1598 return bnx2x_func_state_change(bp, &func_params);
1599}
1600
1601/*
1602 * Cleans the object that have internal lists without sending
1603 * ramrods. Should be run when interrutps are disabled.
1604 */
1605static void bnx2x_squeeze_objects(struct bnx2x *bp)
1606{
1607 int rc;
1608 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1609 struct bnx2x_mcast_ramrod_params rparam = {0};
1610 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1611
1612 /***************** Cleanup MACs' object first *************************/
1613
1614 /* Wait for completion of requested */
1615 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1616 /* Perform a dry cleanup */
1617 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1618
1619 /* Clean ETH primary MAC */
1620 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1621 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1622 &ramrod_flags);
1623 if (rc != 0)
1624 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1625
1626 /* Cleanup UC list */
1627 vlan_mac_flags = 0;
1628 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1629 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1630 &ramrod_flags);
1631 if (rc != 0)
1632 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1633
1634 /***************** Now clean mcast object *****************************/
1635 rparam.mcast_obj = &bp->mcast_obj;
1636 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1637
1638 /* Add a DEL command... */
1639 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1640 if (rc < 0)
1641 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1642 "object: %d\n", rc);
1643
1644 /* ...and wait until all pending commands are cleared */
1645 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1646 while (rc != 0) {
1647 if (rc < 0) {
1648 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1649 rc);
1650 return;
1651 }
1652
1653 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1654 }
1655}
1656
1657#ifndef BNX2X_STOP_ON_ERROR
1658#define LOAD_ERROR_EXIT(bp, label) \
1659 do { \
1660 (bp)->state = BNX2X_STATE_ERROR; \
1661 goto label; \
1662 } while (0)
1663#else
1664#define LOAD_ERROR_EXIT(bp, label) \
1665 do { \
1666 (bp)->state = BNX2X_STATE_ERROR; \
1667 (bp)->panic = 1; \
1668 return -EBUSY; \
1669 } while (0)
1670#endif
1671
9f6c9258
DK
1672/* must be called with rtnl_lock */
1673int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1674{
619c5cb6 1675 int port = BP_PORT(bp);
9f6c9258
DK
1676 u32 load_code;
1677 int i, rc;
1678
1679#ifdef BNX2X_STOP_ON_ERROR
1680 if (unlikely(bp->panic))
1681 return -EPERM;
1682#endif
1683
1684 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1685
2ae17f66
VZ
1686 /* Set the initial link reported state to link down */
1687 bnx2x_acquire_phy_lock(bp);
1688 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1689 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1690 &bp->last_reported_link.link_report_flags);
1691 bnx2x_release_phy_lock(bp);
1692
523224a3
DK
1693 /* must be called before memory allocation and HW init */
1694 bnx2x_ilt_set_info(bp);
1695
6383c0b3
AE
1696 /*
1697 * Zero fastpath structures preserving invariants like napi, which are
1698 * allocated only once, fp index, max_cos, bp pointer.
1699 * Also set fp->disable_tpa.
b3b83c3f
DK
1700 */
1701 for_each_queue(bp, i)
1702 bnx2x_bz_fp(bp, i);
1703
6383c0b3 1704
a8c94b91
VZ
1705 /* Set the receive queues buffer size */
1706 bnx2x_set_rx_buf_size(bp);
1707
d6214d7a 1708 if (bnx2x_alloc_mem(bp))
9f6c9258 1709 return -ENOMEM;
d6214d7a 1710
b3b83c3f
DK
1711 /* As long as bnx2x_alloc_mem() may possibly update
1712 * bp->num_queues, bnx2x_set_real_num_queues() should always
1713 * come after it.
1714 */
ec6ba945 1715 rc = bnx2x_set_real_num_queues(bp);
d6214d7a 1716 if (rc) {
ec6ba945 1717 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 1718 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
1719 }
1720
6383c0b3
AE
1721 /* configure multi cos mappings in kernel.
1722 * this configuration may be overriden by a multi class queue discipline
1723 * or by a dcbx negotiation result.
1724 */
1725 bnx2x_setup_tc(bp->dev, bp->max_cos);
1726
9f6c9258
DK
1727 bnx2x_napi_enable(bp);
1728
889b9af3
AE
1729 /* set pf load just before approaching the MCP */
1730 bnx2x_set_pf_load(bp);
1731
9f6c9258 1732 /* Send LOAD_REQUEST command to MCP
619c5cb6
VZ
1733 * Returns the type of LOAD command:
1734 * if it is the first port to be initialized
1735 * common blocks should be initialized, otherwise - not
1736 */
9f6c9258 1737 if (!BP_NOMCP(bp)) {
95c6c616
AE
1738 /* init fw_seq */
1739 bp->fw_seq =
1740 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1741 DRV_MSG_SEQ_NUMBER_MASK);
1742 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1743
1744 /* Get current FW pulse sequence */
1745 bp->fw_drv_pulse_wr_seq =
1746 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1747 DRV_PULSE_SEQ_MASK);
1748 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1749
a22f0788 1750 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1751 if (!load_code) {
1752 BNX2X_ERR("MCP response failure, aborting\n");
1753 rc = -EBUSY;
619c5cb6 1754 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258
DK
1755 }
1756 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1757 rc = -EBUSY; /* other port in diagnostic mode */
619c5cb6 1758 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258 1759 }
d1e2d966
AE
1760 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1761 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
1762 /* build FW version dword */
1763 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1764 (BCM_5710_FW_MINOR_VERSION << 8) +
1765 (BCM_5710_FW_REVISION_VERSION << 16) +
1766 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1767
1768 /* read loaded FW from chip */
1769 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1770
1771 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
1772 loaded_fw, my_fw);
1773
1774 /* abort nic load if version mismatch */
1775 if (my_fw != loaded_fw) {
1776 BNX2X_ERR("bnx2x with FW %x already loaded, "
1777 "which mismatches my %x FW. aborting",
1778 loaded_fw, my_fw);
1779 rc = -EBUSY;
1780 LOAD_ERROR_EXIT(bp, load_error2);
1781 }
1782 }
9f6c9258
DK
1783
1784 } else {
f2e0899f 1785 int path = BP_PATH(bp);
9f6c9258 1786
f2e0899f
DK
1787 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1788 path, load_count[path][0], load_count[path][1],
1789 load_count[path][2]);
1790 load_count[path][0]++;
1791 load_count[path][1 + port]++;
1792 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1793 path, load_count[path][0], load_count[path][1],
1794 load_count[path][2]);
1795 if (load_count[path][0] == 1)
9f6c9258 1796 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1797 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1798 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1799 else
1800 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1801 }
1802
1803 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1804 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
3deb8167 1805 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
9f6c9258 1806 bp->port.pmf = 1;
3deb8167
YR
1807 /*
1808 * We need the barrier to ensure the ordering between the
1809 * writing to bp->port.pmf here and reading it from the
1810 * bnx2x_periodic_task().
1811 */
1812 smp_mb();
1813 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1814 } else
9f6c9258 1815 bp->port.pmf = 0;
6383c0b3 1816
9f6c9258
DK
1817 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1818
619c5cb6
VZ
1819 /* Init Function state controlling object */
1820 bnx2x__init_func_obj(bp);
1821
9f6c9258
DK
1822 /* Initialize HW */
1823 rc = bnx2x_init_hw(bp, load_code);
1824 if (rc) {
1825 BNX2X_ERR("HW init failed, aborting\n");
a22f0788 1826 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 1827 LOAD_ERROR_EXIT(bp, load_error2);
9f6c9258
DK
1828 }
1829
d6214d7a
DK
1830 /* Connect to IRQs */
1831 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1832 if (rc) {
1833 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 1834 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
1835 }
1836
9f6c9258
DK
1837 /* Setup NIC internals and enable interrupts */
1838 bnx2x_nic_init(bp, load_code);
1839
619c5cb6
VZ
1840 /* Init per-function objects */
1841 bnx2x_init_bp_objs(bp);
1842
f2e0899f
DK
1843 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1844 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
619c5cb6
VZ
1845 (bp->common.shmem2_base)) {
1846 if (SHMEM2_HAS(bp, dcc_support))
1847 SHMEM2_WR(bp, dcc_support,
1848 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1849 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1850 }
1851
1852 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1853 rc = bnx2x_func_start(bp);
1854 if (rc) {
1855 BNX2X_ERR("Function start failed!\n");
c636322b 1856 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6
VZ
1857 LOAD_ERROR_EXIT(bp, load_error3);
1858 }
9f6c9258
DK
1859
1860 /* Send LOAD_DONE command to MCP */
1861 if (!BP_NOMCP(bp)) {
a22f0788 1862 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1863 if (!load_code) {
1864 BNX2X_ERR("MCP response failure, aborting\n");
1865 rc = -EBUSY;
619c5cb6 1866 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258
DK
1867 }
1868 }
1869
619c5cb6 1870 rc = bnx2x_setup_leading(bp);
9f6c9258
DK
1871 if (rc) {
1872 BNX2X_ERR("Setup leading failed!\n");
619c5cb6 1873 LOAD_ERROR_EXIT(bp, load_error3);
f2e0899f 1874 }
9f6c9258 1875
9f6c9258 1876#ifdef BCM_CNIC
523224a3 1877 /* Enable Timer scan */
619c5cb6 1878 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
9f6c9258 1879#endif
f85582f8 1880
523224a3 1881 for_each_nondefault_queue(bp, i) {
619c5cb6 1882 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
523224a3 1883 if (rc)
619c5cb6 1884 LOAD_ERROR_EXIT(bp, load_error4);
523224a3
DK
1885 }
1886
619c5cb6
VZ
1887 rc = bnx2x_init_rss_pf(bp);
1888 if (rc)
1889 LOAD_ERROR_EXIT(bp, load_error4);
1890
523224a3
DK
1891 /* Now when Clients are configured we are ready to work */
1892 bp->state = BNX2X_STATE_OPEN;
1893
619c5cb6
VZ
1894 /* Configure a ucast MAC */
1895 rc = bnx2x_set_eth_mac(bp, true);
1896 if (rc)
1897 LOAD_ERROR_EXIT(bp, load_error4);
6e30dd4e 1898
e3835b99
DK
1899 if (bp->pending_max) {
1900 bnx2x_update_max_mf_config(bp, bp->pending_max);
1901 bp->pending_max = 0;
1902 }
1903
9f6c9258
DK
1904 if (bp->port.pmf)
1905 bnx2x_initial_phy_init(bp, load_mode);
1906
619c5cb6
VZ
1907 /* Start fast path */
1908
1909 /* Initialize Rx filter. */
1910 netif_addr_lock_bh(bp->dev);
6e30dd4e 1911 bnx2x_set_rx_mode(bp->dev);
619c5cb6 1912 netif_addr_unlock_bh(bp->dev);
6e30dd4e 1913
619c5cb6 1914 /* Start the Tx */
9f6c9258
DK
1915 switch (load_mode) {
1916 case LOAD_NORMAL:
523224a3
DK
1917 /* Tx queue should be only reenabled */
1918 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1919 break;
1920
1921 case LOAD_OPEN:
1922 netif_tx_start_all_queues(bp->dev);
523224a3 1923 smp_mb__after_clear_bit();
9f6c9258
DK
1924 break;
1925
1926 case LOAD_DIAG:
9f6c9258
DK
1927 bp->state = BNX2X_STATE_DIAG;
1928 break;
1929
1930 default:
1931 break;
1932 }
1933
00253a8c
DK
1934 if (bp->port.pmf)
1935 bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
1936 else
9f6c9258
DK
1937 bnx2x__link_status_update(bp);
1938
1939 /* start the timer */
1940 mod_timer(&bp->timer, jiffies + bp->current_interval);
1941
1942#ifdef BCM_CNIC
b306f5ed
DK
1943 /* re-read iscsi info */
1944 bnx2x_get_iscsi_info(bp);
9f6c9258
DK
1945 bnx2x_setup_cnic_irq_info(bp);
1946 if (bp->state == BNX2X_STATE_OPEN)
1947 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1948#endif
9f6c9258 1949
619c5cb6
VZ
1950 /* Wait for all pending SP commands to complete */
1951 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1952 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1953 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1954 return -EBUSY;
1955 }
6891dd25 1956
619c5cb6 1957 bnx2x_dcbx_init(bp);
9f6c9258
DK
1958 return 0;
1959
619c5cb6 1960#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 1961load_error4:
619c5cb6 1962#ifdef BCM_CNIC
9f6c9258 1963 /* Disable Timer scan */
619c5cb6 1964 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9f6c9258
DK
1965#endif
1966load_error3:
1967 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1968
619c5cb6
VZ
1969 /* Clean queueable objects */
1970 bnx2x_squeeze_objects(bp);
1971
9f6c9258
DK
1972 /* Free SKBs, SGEs, TPA pool and driver internals */
1973 bnx2x_free_skbs(bp);
ec6ba945 1974 for_each_rx_queue(bp, i)
9f6c9258 1975 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1976
9f6c9258 1977 /* Release IRQs */
d6214d7a
DK
1978 bnx2x_free_irq(bp);
1979load_error2:
1980 if (!BP_NOMCP(bp)) {
1981 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1982 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1983 }
1984
1985 bp->port.pmf = 0;
9f6c9258
DK
1986load_error1:
1987 bnx2x_napi_disable(bp);
889b9af3
AE
1988 /* clear pf_load status, as it was already set */
1989 bnx2x_clear_pf_load(bp);
d6214d7a 1990load_error0:
9f6c9258
DK
1991 bnx2x_free_mem(bp);
1992
1993 return rc;
619c5cb6 1994#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
1995}
1996
1997/* must be called with rtnl_lock */
1998int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1999{
2000 int i;
c9ee9206
VZ
2001 bool global = false;
2002
2003 if ((bp->state == BNX2X_STATE_CLOSED) ||
2004 (bp->state == BNX2X_STATE_ERROR)) {
2005 /* We can get here if the driver has been unloaded
2006 * during parity error recovery and is either waiting for a
2007 * leader to complete or for other functions to unload and
2008 * then ifdown has been issued. In this case we want to
2009 * unload and let other functions to complete a recovery
2010 * process.
2011 */
9f6c9258
DK
2012 bp->recovery_state = BNX2X_RECOVERY_DONE;
2013 bp->is_leader = 0;
c9ee9206
VZ
2014 bnx2x_release_leader_lock(bp);
2015 smp_mb();
2016
2017 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
9f6c9258
DK
2018
2019 return -EINVAL;
2020 }
2021
87b7ba3d
VZ
2022 /*
2023 * It's important to set the bp->state to the value different from
2024 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2025 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2026 */
2027 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2028 smp_mb();
2029
9505ee37
VZ
2030 /* Stop Tx */
2031 bnx2x_tx_disable(bp);
2032
9f6c9258
DK
2033#ifdef BCM_CNIC
2034 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2035#endif
9f6c9258 2036
9f6c9258 2037 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2038
9f6c9258 2039 del_timer_sync(&bp->timer);
f85582f8 2040
619c5cb6
VZ
2041 /* Set ALWAYS_ALIVE bit in shmem */
2042 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2043
2044 bnx2x_drv_pulse(bp);
9f6c9258 2045
f85582f8 2046 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1355b704 2047 bnx2x_save_statistics(bp);
9f6c9258
DK
2048
2049 /* Cleanup the chip if needed */
2050 if (unload_mode != UNLOAD_RECOVERY)
2051 bnx2x_chip_cleanup(bp, unload_mode);
523224a3 2052 else {
c9ee9206
VZ
2053 /* Send the UNLOAD_REQUEST to the MCP */
2054 bnx2x_send_unload_req(bp, unload_mode);
2055
2056 /*
2057 * Prevent transactions to host from the functions on the
2058 * engine that doesn't reset global blocks in case of global
2059 * attention once gloabl blocks are reset and gates are opened
2060 * (the engine which leader will perform the recovery
2061 * last).
2062 */
2063 if (!CHIP_IS_E1x(bp))
2064 bnx2x_pf_disable(bp);
2065
2066 /* Disable HW interrupts, NAPI */
523224a3
DK
2067 bnx2x_netif_stop(bp, 1);
2068
2069 /* Release IRQs */
d6214d7a 2070 bnx2x_free_irq(bp);
c9ee9206
VZ
2071
2072 /* Report UNLOAD_DONE to MCP */
2073 bnx2x_send_unload_done(bp);
523224a3 2074 }
9f6c9258 2075
619c5cb6
VZ
2076 /*
2077 * At this stage no more interrupts will arrive so we may safly clean
2078 * the queueable objects here in case they failed to get cleaned so far.
2079 */
2080 bnx2x_squeeze_objects(bp);
2081
79616895
VZ
2082 /* There should be no more pending SP commands at this stage */
2083 bp->sp_state = 0;
2084
9f6c9258
DK
2085 bp->port.pmf = 0;
2086
2087 /* Free SKBs, SGEs, TPA pool and driver internals */
2088 bnx2x_free_skbs(bp);
ec6ba945 2089 for_each_rx_queue(bp, i)
9f6c9258 2090 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2091
9f6c9258
DK
2092 bnx2x_free_mem(bp);
2093
2094 bp->state = BNX2X_STATE_CLOSED;
2095
c9ee9206
VZ
2096 /* Check if there are pending parity attentions. If there are - set
2097 * RECOVERY_IN_PROGRESS.
2098 */
2099 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2100 bnx2x_set_reset_in_progress(bp);
2101
2102 /* Set RESET_IS_GLOBAL if needed */
2103 if (global)
2104 bnx2x_set_reset_global(bp);
2105 }
2106
2107
9f6c9258
DK
2108 /* The last driver must disable a "close the gate" if there is no
2109 * parity attention or "process kill" pending.
2110 */
889b9af3 2111 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2112 bnx2x_disable_close_the_gate(bp);
2113
9f6c9258
DK
2114 return 0;
2115}
f85582f8 2116
9f6c9258
DK
2117int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2118{
2119 u16 pmcsr;
2120
adf5f6a1
DK
2121 /* If there is no power capability, silently succeed */
2122 if (!bp->pm_cap) {
2123 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2124 return 0;
2125 }
2126
9f6c9258
DK
2127 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2128
2129 switch (state) {
2130 case PCI_D0:
2131 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2132 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2133 PCI_PM_CTRL_PME_STATUS));
2134
2135 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2136 /* delay required during transition out of D3hot */
2137 msleep(20);
2138 break;
2139
2140 case PCI_D3hot:
2141 /* If there are other clients above don't
2142 shut down the power */
2143 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2144 return 0;
2145 /* Don't shut down the power for emulation and FPGA */
2146 if (CHIP_REV_IS_SLOW(bp))
2147 return 0;
2148
2149 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2150 pmcsr |= 3;
2151
2152 if (bp->wol)
2153 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2154
2155 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2156 pmcsr);
2157
2158 /* No more memory access after this point until
2159 * device is brought back to D0.
2160 */
2161 break;
2162
2163 default:
2164 return -EINVAL;
2165 }
2166 return 0;
2167}
2168
9f6c9258
DK
2169/*
2170 * net_device service functions
2171 */
d6214d7a 2172int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
2173{
2174 int work_done = 0;
6383c0b3 2175 u8 cos;
9f6c9258
DK
2176 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2177 napi);
2178 struct bnx2x *bp = fp->bp;
2179
2180 while (1) {
2181#ifdef BNX2X_STOP_ON_ERROR
2182 if (unlikely(bp->panic)) {
2183 napi_complete(napi);
2184 return 0;
2185 }
2186#endif
2187
6383c0b3
AE
2188 for_each_cos_in_tx_queue(fp, cos)
2189 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2190 bnx2x_tx_int(bp, &fp->txdata[cos]);
2191
9f6c9258
DK
2192
2193 if (bnx2x_has_rx_work(fp)) {
2194 work_done += bnx2x_rx_int(fp, budget - work_done);
2195
2196 /* must not complete if we consumed full budget */
2197 if (work_done >= budget)
2198 break;
2199 }
2200
2201 /* Fall out from the NAPI loop if needed */
2202 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
ec6ba945
VZ
2203#ifdef BCM_CNIC
2204 /* No need to update SB for FCoE L2 ring as long as
2205 * it's connected to the default SB and the SB
2206 * has been updated when NAPI was scheduled.
2207 */
2208 if (IS_FCOE_FP(fp)) {
2209 napi_complete(napi);
2210 break;
2211 }
2212#endif
2213
9f6c9258 2214 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
2215 /* bnx2x_has_rx_work() reads the status block,
2216 * thus we need to ensure that status block indices
2217 * have been actually read (bnx2x_update_fpsb_idx)
2218 * prior to this check (bnx2x_has_rx_work) so that
2219 * we won't write the "newer" value of the status block
2220 * to IGU (if there was a DMA right after
2221 * bnx2x_has_rx_work and if there is no rmb, the memory
2222 * reading (bnx2x_update_fpsb_idx) may be postponed
2223 * to right before bnx2x_ack_sb). In this case there
2224 * will never be another interrupt until there is
2225 * another update of the status block, while there
2226 * is still unhandled work.
2227 */
9f6c9258
DK
2228 rmb();
2229
2230 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2231 napi_complete(napi);
2232 /* Re-enable interrupts */
523224a3
DK
2233 DP(NETIF_MSG_HW,
2234 "Update index to %d\n", fp->fp_hc_idx);
2235 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2236 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
2237 IGU_INT_ENABLE, 1);
2238 break;
2239 }
2240 }
2241 }
2242
2243 return work_done;
2244}
2245
9f6c9258
DK
2246/* we split the first BD into headers and data BDs
2247 * to ease the pain of our fellow microcode engineers
2248 * we use one mapping for both BDs
2249 * So far this has only been observed to happen
2250 * in Other Operating Systems(TM)
2251 */
2252static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
6383c0b3 2253 struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
2254 struct sw_tx_bd *tx_buf,
2255 struct eth_tx_start_bd **tx_bd, u16 hlen,
2256 u16 bd_prod, int nbd)
2257{
2258 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2259 struct eth_tx_bd *d_tx_bd;
2260 dma_addr_t mapping;
2261 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2262
2263 /* first fix first BD */
2264 h_tx_bd->nbd = cpu_to_le16(nbd);
2265 h_tx_bd->nbytes = cpu_to_le16(hlen);
2266
2267 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2268 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2269 h_tx_bd->addr_lo, h_tx_bd->nbd);
2270
2271 /* now get a new data BD
2272 * (after the pbd) and fill it */
2273 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 2274 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
2275
2276 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2277 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2278
2279 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2280 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2281 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2282
2283 /* this marks the BD as one that has no individual mapping */
2284 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2285
2286 DP(NETIF_MSG_TX_QUEUED,
2287 "TSO split data size is %d (%x:%x)\n",
2288 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2289
2290 /* update tx_bd */
2291 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2292
2293 return bd_prod;
2294}
2295
2296static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2297{
2298 if (fix > 0)
2299 csum = (u16) ~csum_fold(csum_sub(csum,
2300 csum_partial(t_header - fix, fix, 0)));
2301
2302 else if (fix < 0)
2303 csum = (u16) ~csum_fold(csum_add(csum,
2304 csum_partial(t_header, -fix, 0)));
2305
2306 return swab16(csum);
2307}
2308
2309static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2310{
2311 u32 rc;
2312
2313 if (skb->ip_summed != CHECKSUM_PARTIAL)
2314 rc = XMIT_PLAIN;
2315
2316 else {
d0d9d8ef 2317 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
2318 rc = XMIT_CSUM_V6;
2319 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2320 rc |= XMIT_CSUM_TCP;
2321
2322 } else {
2323 rc = XMIT_CSUM_V4;
2324 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2325 rc |= XMIT_CSUM_TCP;
2326 }
2327 }
2328
5892b9e9
VZ
2329 if (skb_is_gso_v6(skb))
2330 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2331 else if (skb_is_gso(skb))
2332 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
2333
2334 return rc;
2335}
2336
2337#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2338/* check if packet requires linearization (packet is too fragmented)
2339 no need to check fragmentation if page size > 8K (there will be no
2340 violation to FW restrictions) */
2341static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2342 u32 xmit_type)
2343{
2344 int to_copy = 0;
2345 int hlen = 0;
2346 int first_bd_sz = 0;
2347
2348 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2349 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2350
2351 if (xmit_type & XMIT_GSO) {
2352 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2353 /* Check if LSO packet needs to be copied:
2354 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2355 int wnd_size = MAX_FETCH_BD - 3;
2356 /* Number of windows to check */
2357 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2358 int wnd_idx = 0;
2359 int frag_idx = 0;
2360 u32 wnd_sum = 0;
2361
2362 /* Headers length */
2363 hlen = (int)(skb_transport_header(skb) - skb->data) +
2364 tcp_hdrlen(skb);
2365
2366 /* Amount of data (w/o headers) on linear part of SKB*/
2367 first_bd_sz = skb_headlen(skb) - hlen;
2368
2369 wnd_sum = first_bd_sz;
2370
2371 /* Calculate the first sum - it's special */
2372 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2373 wnd_sum +=
9e903e08 2374 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
2375
2376 /* If there was data on linear skb data - check it */
2377 if (first_bd_sz > 0) {
2378 if (unlikely(wnd_sum < lso_mss)) {
2379 to_copy = 1;
2380 goto exit_lbl;
2381 }
2382
2383 wnd_sum -= first_bd_sz;
2384 }
2385
2386 /* Others are easier: run through the frag list and
2387 check all windows */
2388 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2389 wnd_sum +=
9e903e08 2390 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
2391
2392 if (unlikely(wnd_sum < lso_mss)) {
2393 to_copy = 1;
2394 break;
2395 }
2396 wnd_sum -=
9e903e08 2397 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
2398 }
2399 } else {
2400 /* in non-LSO too fragmented packet should always
2401 be linearized */
2402 to_copy = 1;
2403 }
2404 }
2405
2406exit_lbl:
2407 if (unlikely(to_copy))
2408 DP(NETIF_MSG_TX_QUEUED,
2409 "Linearization IS REQUIRED for %s packet. "
2410 "num_frags %d hlen %d first_bd_sz %d\n",
2411 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2412 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2413
2414 return to_copy;
2415}
2416#endif
2417
2297a2da
VZ
2418static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2419 u32 xmit_type)
f2e0899f 2420{
2297a2da
VZ
2421 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2422 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2423 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
2424 if ((xmit_type & XMIT_GSO_V6) &&
2425 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 2426 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
2427}
2428
2429/**
e8920674 2430 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 2431 *
e8920674
DK
2432 * @skb: packet skb
2433 * @pbd: parse BD
2434 * @xmit_type: xmit flags
f2e0899f
DK
2435 */
2436static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2437 struct eth_tx_parse_bd_e1x *pbd,
2438 u32 xmit_type)
2439{
2440 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2441 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2442 pbd->tcp_flags = pbd_tcp_flags(skb);
2443
2444 if (xmit_type & XMIT_GSO_V4) {
2445 pbd->ip_id = swab16(ip_hdr(skb)->id);
2446 pbd->tcp_pseudo_csum =
2447 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2448 ip_hdr(skb)->daddr,
2449 0, IPPROTO_TCP, 0));
2450
2451 } else
2452 pbd->tcp_pseudo_csum =
2453 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2454 &ipv6_hdr(skb)->daddr,
2455 0, IPPROTO_TCP, 0));
2456
2457 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2458}
f85582f8 2459
f2e0899f 2460/**
e8920674 2461 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 2462 *
e8920674
DK
2463 * @bp: driver handle
2464 * @skb: packet skb
2465 * @parsing_data: data to be updated
2466 * @xmit_type: xmit flags
f2e0899f 2467 *
e8920674 2468 * 57712 related
f2e0899f
DK
2469 */
2470static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 2471 u32 *parsing_data, u32 xmit_type)
f2e0899f 2472{
e39aece7
VZ
2473 *parsing_data |=
2474 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2475 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2476 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 2477
e39aece7
VZ
2478 if (xmit_type & XMIT_CSUM_TCP) {
2479 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2480 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2481 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 2482
e39aece7
VZ
2483 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2484 } else
2485 /* We support checksum offload for TCP and UDP only.
2486 * No need to pass the UDP header length - it's a constant.
2487 */
2488 return skb_transport_header(skb) +
2489 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
2490}
2491
93ef5c02
DK
2492static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2493 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2494{
93ef5c02
DK
2495 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2496
2497 if (xmit_type & XMIT_CSUM_V4)
2498 tx_start_bd->bd_flags.as_bitfield |=
2499 ETH_TX_BD_FLAGS_IP_CSUM;
2500 else
2501 tx_start_bd->bd_flags.as_bitfield |=
2502 ETH_TX_BD_FLAGS_IPV6;
2503
2504 if (!(xmit_type & XMIT_CSUM_TCP))
2505 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
2506}
2507
f2e0899f 2508/**
e8920674 2509 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 2510 *
e8920674
DK
2511 * @bp: driver handle
2512 * @skb: packet skb
2513 * @pbd: parse BD to be updated
2514 * @xmit_type: xmit flags
f2e0899f
DK
2515 */
2516static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2517 struct eth_tx_parse_bd_e1x *pbd,
2518 u32 xmit_type)
2519{
e39aece7 2520 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
2521
2522 /* for now NS flag is not used in Linux */
2523 pbd->global_data =
2524 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2525 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2526
2527 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 2528 skb_network_header(skb)) >> 1;
f2e0899f 2529
e39aece7
VZ
2530 hlen += pbd->ip_hlen_w;
2531
2532 /* We support checksum offload for TCP and UDP only */
2533 if (xmit_type & XMIT_CSUM_TCP)
2534 hlen += tcp_hdrlen(skb) / 2;
2535 else
2536 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
2537
2538 pbd->total_hlen_w = cpu_to_le16(hlen);
2539 hlen = hlen*2;
2540
2541 if (xmit_type & XMIT_CSUM_TCP) {
2542 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2543
2544 } else {
2545 s8 fix = SKB_CS_OFF(skb); /* signed! */
2546
2547 DP(NETIF_MSG_TX_QUEUED,
2548 "hlen %d fix %d csum before fix %x\n",
2549 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2550
2551 /* HW bug: fixup the CSUM */
2552 pbd->tcp_pseudo_csum =
2553 bnx2x_csum_fix(skb_transport_header(skb),
2554 SKB_CS(skb), fix);
2555
2556 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2557 pbd->tcp_pseudo_csum);
2558 }
2559
2560 return hlen;
2561}
f85582f8 2562
9f6c9258
DK
2563/* called with netif_tx_lock
2564 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2565 * netif_wake_queue()
2566 */
2567netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2568{
2569 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 2570
9f6c9258
DK
2571 struct bnx2x_fastpath *fp;
2572 struct netdev_queue *txq;
6383c0b3 2573 struct bnx2x_fp_txdata *txdata;
9f6c9258 2574 struct sw_tx_bd *tx_buf;
619c5cb6 2575 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 2576 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 2577 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 2578 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 2579 u32 pbd_e2_parsing_data = 0;
9f6c9258 2580 u16 pkt_prod, bd_prod;
6383c0b3 2581 int nbd, txq_index, fp_index, txdata_index;
9f6c9258
DK
2582 dma_addr_t mapping;
2583 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2584 int i;
2585 u8 hlen = 0;
2586 __le16 pkt_size = 0;
2587 struct ethhdr *eth;
2588 u8 mac_type = UNICAST_ADDRESS;
2589
2590#ifdef BNX2X_STOP_ON_ERROR
2591 if (unlikely(bp->panic))
2592 return NETDEV_TX_BUSY;
2593#endif
2594
6383c0b3
AE
2595 txq_index = skb_get_queue_mapping(skb);
2596 txq = netdev_get_tx_queue(dev, txq_index);
2597
2598 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2599
2600 /* decode the fastpath index and the cos index from the txq */
2601 fp_index = TXQ_TO_FP(txq_index);
2602 txdata_index = TXQ_TO_COS(txq_index);
2603
2604#ifdef BCM_CNIC
2605 /*
2606 * Override the above for the FCoE queue:
2607 * - FCoE fp entry is right after the ETH entries.
2608 * - FCoE L2 queue uses bp->txdata[0] only.
2609 */
2610 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2611 bnx2x_fcoe_tx(bp, txq_index)))) {
2612 fp_index = FCOE_IDX;
2613 txdata_index = 0;
2614 }
2615#endif
2616
2617 /* enable this debug print to view the transmission queue being used
94f05b0f 2618 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 2619 txq_index, fp_index, txdata_index); */
9f6c9258 2620
6383c0b3 2621 /* locate the fastpath and the txdata */
9f6c9258 2622 fp = &bp->fp[fp_index];
6383c0b3
AE
2623 txdata = &fp->txdata[txdata_index];
2624
2625 /* enable this debug print to view the tranmission details
2626 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
94f05b0f 2627 " tx_data ptr %p fp pointer %p\n",
6383c0b3 2628 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 2629
6383c0b3
AE
2630 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2631 (skb_shinfo(skb)->nr_frags + 3))) {
9f6c9258
DK
2632 fp->eth_q_stats.driver_xoff++;
2633 netif_tx_stop_queue(txq);
2634 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2635 return NETDEV_TX_BUSY;
2636 }
2637
f2e0899f
DK
2638 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2639 "protocol(%x,%x) gso type %x xmit_type %x\n",
6383c0b3 2640 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
2641 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2642
2643 eth = (struct ethhdr *)skb->data;
2644
2645 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2646 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2647 if (is_broadcast_ether_addr(eth->h_dest))
2648 mac_type = BROADCAST_ADDRESS;
2649 else
2650 mac_type = MULTICAST_ADDRESS;
2651 }
2652
2653#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2654 /* First, check if we need to linearize the skb (due to FW
2655 restrictions). No need to check fragmentation if page size > 8K
2656 (there will be no violation to FW restrictions) */
2657 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2658 /* Statistics of linearization */
2659 bp->lin_cnt++;
2660 if (skb_linearize(skb) != 0) {
2661 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2662 "silently dropping this SKB\n");
2663 dev_kfree_skb_any(skb);
2664 return NETDEV_TX_OK;
2665 }
2666 }
2667#endif
619c5cb6
VZ
2668 /* Map skb linear data for DMA */
2669 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2670 skb_headlen(skb), DMA_TO_DEVICE);
2671 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2672 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2673 "silently dropping this SKB\n");
2674 dev_kfree_skb_any(skb);
2675 return NETDEV_TX_OK;
2676 }
9f6c9258
DK
2677 /*
2678 Please read carefully. First we use one BD which we mark as start,
2679 then we have a parsing info BD (used for TSO or xsum),
2680 and only then we have the rest of the TSO BDs.
2681 (don't forget to mark the last one as last,
2682 and to unmap only AFTER you write to the BD ...)
2683 And above all, all pdb sizes are in words - NOT DWORDS!
2684 */
2685
619c5cb6
VZ
2686 /* get current pkt produced now - advance it just before sending packet
2687 * since mapping of pages may fail and cause packet to be dropped
2688 */
6383c0b3
AE
2689 pkt_prod = txdata->tx_pkt_prod;
2690 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 2691
619c5cb6
VZ
2692 /* get a tx_buf and first BD
2693 * tx_start_bd may be changed during SPLIT,
2694 * but first_bd will always stay first
2695 */
6383c0b3
AE
2696 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2697 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 2698 first_bd = tx_start_bd;
9f6c9258
DK
2699
2700 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2701 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2702 mac_type);
2703
9f6c9258 2704 /* header nbd */
f85582f8 2705 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2706
2707 /* remember the first BD of the packet */
6383c0b3 2708 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
2709 tx_buf->skb = skb;
2710 tx_buf->flags = 0;
2711
2712 DP(NETIF_MSG_TX_QUEUED,
2713 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 2714 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 2715
eab6d18d 2716 if (vlan_tx_tag_present(skb)) {
523224a3
DK
2717 tx_start_bd->vlan_or_ethertype =
2718 cpu_to_le16(vlan_tx_tag_get(skb));
2719 tx_start_bd->bd_flags.as_bitfield |=
2720 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 2721 } else
523224a3 2722 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2723
2724 /* turn on parsing and get a BD */
2725 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2726
93ef5c02
DK
2727 if (xmit_type & XMIT_CSUM)
2728 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 2729
619c5cb6 2730 if (!CHIP_IS_E1x(bp)) {
6383c0b3 2731 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f
DK
2732 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2733 /* Set PBD in checksum offload case */
2734 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
2735 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2736 &pbd_e2_parsing_data,
2737 xmit_type);
619c5cb6
VZ
2738 if (IS_MF_SI(bp)) {
2739 /*
2740 * fill in the MAC addresses in the PBD - for local
2741 * switching
2742 */
2743 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2744 &pbd_e2->src_mac_addr_mid,
2745 &pbd_e2->src_mac_addr_lo,
2746 eth->h_source);
2747 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2748 &pbd_e2->dst_mac_addr_mid,
2749 &pbd_e2->dst_mac_addr_lo,
2750 eth->h_dest);
2751 }
f2e0899f 2752 } else {
6383c0b3 2753 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
2754 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2755 /* Set PBD in checksum offload case */
2756 if (xmit_type & XMIT_CSUM)
2757 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2758
9f6c9258
DK
2759 }
2760
f85582f8 2761 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
2762 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2763 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
619c5cb6 2764 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
9f6c9258
DK
2765 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2766 pkt_size = tx_start_bd->nbytes;
2767
2768 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2769 " nbytes %d flags %x vlan %x\n",
2770 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2771 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2772 tx_start_bd->bd_flags.as_bitfield,
2773 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2774
2775 if (xmit_type & XMIT_GSO) {
2776
2777 DP(NETIF_MSG_TX_QUEUED,
2778 "TSO packet len %d hlen %d total len %d tso size %d\n",
2779 skb->len, hlen, skb_headlen(skb),
2780 skb_shinfo(skb)->gso_size);
2781
2782 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2783
2784 if (unlikely(skb_headlen(skb) > hlen))
6383c0b3
AE
2785 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2786 &tx_start_bd, hlen,
2787 bd_prod, ++nbd);
619c5cb6 2788 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
2789 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2790 xmit_type);
f2e0899f
DK
2791 else
2792 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 2793 }
2297a2da
VZ
2794
2795 /* Set the PBD's parsing_data field if not zero
2796 * (for the chips newer than 57711).
2797 */
2798 if (pbd_e2_parsing_data)
2799 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2800
9f6c9258
DK
2801 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2802
f85582f8 2803 /* Handle fragmented skb */
9f6c9258
DK
2804 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2805 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2806
9e903e08
ED
2807 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2808 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 2809 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 2810 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6
VZ
2811
2812 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2813 "dropping packet...\n");
2814
2815 /* we need unmap all buffers already mapped
2816 * for this SKB;
2817 * first_bd->nbd need to be properly updated
2818 * before call to bnx2x_free_tx_pkt
2819 */
2820 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 2821 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
2822 TX_BD(txdata->tx_pkt_prod),
2823 &pkts_compl, &bytes_compl);
619c5cb6
VZ
2824 return NETDEV_TX_OK;
2825 }
2826
9f6c9258 2827 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 2828 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 2829 if (total_pkt_bd == NULL)
6383c0b3 2830 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 2831
9f6c9258
DK
2832 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2833 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
2834 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2835 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 2836 nbd++;
9f6c9258
DK
2837
2838 DP(NETIF_MSG_TX_QUEUED,
2839 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2840 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2841 le16_to_cpu(tx_data_bd->nbytes));
2842 }
2843
2844 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2845
619c5cb6
VZ
2846 /* update with actual num BDs */
2847 first_bd->nbd = cpu_to_le16(nbd);
2848
9f6c9258
DK
2849 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2850
2851 /* now send a tx doorbell, counting the next BD
2852 * if the packet contains or ends with it
2853 */
2854 if (TX_BD_POFF(bd_prod) < nbd)
2855 nbd++;
2856
619c5cb6
VZ
2857 /* total_pkt_bytes should be set on the first data BD if
2858 * it's not an LSO packet and there is more than one
2859 * data BD. In this case pkt_size is limited by an MTU value.
2860 * However we prefer to set it for an LSO packet (while we don't
2861 * have to) in order to save some CPU cycles in a none-LSO
2862 * case, when we much more care about them.
2863 */
9f6c9258
DK
2864 if (total_pkt_bd != NULL)
2865 total_pkt_bd->total_pkt_bytes = pkt_size;
2866
523224a3 2867 if (pbd_e1x)
9f6c9258 2868 DP(NETIF_MSG_TX_QUEUED,
523224a3 2869 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2870 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2871 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2872 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2873 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2874 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2875 if (pbd_e2)
2876 DP(NETIF_MSG_TX_QUEUED,
2877 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2878 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2879 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2880 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2881 pbd_e2->parsing_data);
9f6c9258
DK
2882 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2883
2df1a70a
TH
2884 netdev_tx_sent_queue(txq, skb->len);
2885
6383c0b3 2886 txdata->tx_pkt_prod++;
9f6c9258
DK
2887 /*
2888 * Make sure that the BD data is updated before updating the producer
2889 * since FW might read the BD right after the producer is updated.
2890 * This is only applicable for weak-ordered memory model archs such
2891 * as IA-64. The following barrier is also mandatory since FW will
2892 * assumes packets must have BDs.
2893 */
2894 wmb();
2895
6383c0b3 2896 txdata->tx_db.data.prod += nbd;
9f6c9258 2897 barrier();
f85582f8 2898
6383c0b3 2899 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
2900
2901 mmiowb();
2902
6383c0b3 2903 txdata->tx_bd_prod += nbd;
9f6c9258 2904
6383c0b3 2905 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
9f6c9258
DK
2906 netif_tx_stop_queue(txq);
2907
2908 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2909 * ordering of set_bit() in netif_tx_stop_queue() and read of
2910 * fp->bd_tx_cons */
2911 smp_mb();
2912
2913 fp->eth_q_stats.driver_xoff++;
6383c0b3 2914 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
9f6c9258
DK
2915 netif_tx_wake_queue(txq);
2916 }
6383c0b3 2917 txdata->tx_pkt++;
9f6c9258
DK
2918
2919 return NETDEV_TX_OK;
2920}
f85582f8 2921
6383c0b3
AE
2922/**
2923 * bnx2x_setup_tc - routine to configure net_device for multi tc
2924 *
2925 * @netdev: net device to configure
2926 * @tc: number of traffic classes to enable
2927 *
2928 * callback connected to the ndo_setup_tc function pointer
2929 */
2930int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2931{
2932 int cos, prio, count, offset;
2933 struct bnx2x *bp = netdev_priv(dev);
2934
2935 /* setup tc must be called under rtnl lock */
2936 ASSERT_RTNL();
2937
2938 /* no traffic classes requested. aborting */
2939 if (!num_tc) {
2940 netdev_reset_tc(dev);
2941 return 0;
2942 }
2943
2944 /* requested to support too many traffic classes */
2945 if (num_tc > bp->max_cos) {
2946 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
94f05b0f 2947 " requested: %d. max supported is %d\n",
6383c0b3
AE
2948 num_tc, bp->max_cos);
2949 return -EINVAL;
2950 }
2951
2952 /* declare amount of supported traffic classes */
2953 if (netdev_set_num_tc(dev, num_tc)) {
94f05b0f 2954 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
6383c0b3
AE
2955 num_tc);
2956 return -EINVAL;
2957 }
2958
2959 /* configure priority to traffic class mapping */
2960 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2961 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
94f05b0f 2962 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
6383c0b3
AE
2963 prio, bp->prio_to_cos[prio]);
2964 }
2965
2966
2967 /* Use this configuration to diffrentiate tc0 from other COSes
2968 This can be used for ets or pfc, and save the effort of setting
2969 up a multio class queue disc or negotiating DCBX with a switch
2970 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 2971 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
2972 for (prio = 1; prio < 16; prio++) {
2973 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 2974 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
2975 } */
2976
2977 /* configure traffic class to transmission queue mapping */
2978 for (cos = 0; cos < bp->max_cos; cos++) {
2979 count = BNX2X_NUM_ETH_QUEUES(bp);
2980 offset = cos * MAX_TXQS_PER_COS;
2981 netdev_set_tc_queue(dev, cos, count, offset);
94f05b0f 2982 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
2983 cos, offset, count);
2984 }
2985
2986 return 0;
2987}
2988
9f6c9258
DK
2989/* called with rtnl_lock */
2990int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2991{
2992 struct sockaddr *addr = p;
2993 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 2994 int rc = 0;
9f6c9258 2995
614c76df
DK
2996 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
2997 return -EINVAL;
2998
2999#ifdef BCM_CNIC
3000 if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
9f6c9258 3001 return -EINVAL;
614c76df 3002#endif
9f6c9258 3003
619c5cb6
VZ
3004 if (netif_running(dev)) {
3005 rc = bnx2x_set_eth_mac(bp, false);
3006 if (rc)
3007 return rc;
3008 }
3009
9f6c9258 3010 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 3011
523224a3 3012 if (netif_running(dev))
619c5cb6 3013 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 3014
619c5cb6 3015 return rc;
9f6c9258
DK
3016}
3017
b3b83c3f
DK
3018static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3019{
3020 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3021 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 3022 u8 cos;
b3b83c3f
DK
3023
3024 /* Common */
3025#ifdef BCM_CNIC
3026 if (IS_FCOE_IDX(fp_index)) {
3027 memset(sb, 0, sizeof(union host_hc_status_block));
3028 fp->status_blk_mapping = 0;
3029
3030 } else {
3031#endif
3032 /* status blocks */
619c5cb6 3033 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3034 BNX2X_PCI_FREE(sb->e2_sb,
3035 bnx2x_fp(bp, fp_index,
3036 status_blk_mapping),
3037 sizeof(struct host_hc_status_block_e2));
3038 else
3039 BNX2X_PCI_FREE(sb->e1x_sb,
3040 bnx2x_fp(bp, fp_index,
3041 status_blk_mapping),
3042 sizeof(struct host_hc_status_block_e1x));
3043#ifdef BCM_CNIC
3044 }
3045#endif
3046 /* Rx */
3047 if (!skip_rx_queue(bp, fp_index)) {
3048 bnx2x_free_rx_bds(fp);
3049
3050 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3051 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3052 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3053 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3054 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3055
3056 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3057 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3058 sizeof(struct eth_fast_path_rx_cqe) *
3059 NUM_RCQ_BD);
3060
3061 /* SGE ring */
3062 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3063 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3064 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3065 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3066 }
3067
3068 /* Tx */
3069 if (!skip_tx_queue(bp, fp_index)) {
3070 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3
AE
3071 for_each_cos_in_tx_queue(fp, cos) {
3072 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3073
3074 DP(BNX2X_MSG_SP,
94f05b0f 3075 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
3076 fp_index, cos, txdata->cid);
3077
3078 BNX2X_FREE(txdata->tx_buf_ring);
3079 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3080 txdata->tx_desc_mapping,
3081 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3082 }
b3b83c3f
DK
3083 }
3084 /* end of fastpath */
3085}
3086
3087void bnx2x_free_fp_mem(struct bnx2x *bp)
3088{
3089 int i;
3090 for_each_queue(bp, i)
3091 bnx2x_free_fp_mem_at(bp, i);
3092}
3093
3094static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3095{
3096 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 3097 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
3098 bnx2x_fp(bp, index, sb_index_values) =
3099 (__le16 *)status_blk.e2_sb->sb.index_values;
3100 bnx2x_fp(bp, index, sb_running_index) =
3101 (__le16 *)status_blk.e2_sb->sb.running_index;
3102 } else {
3103 bnx2x_fp(bp, index, sb_index_values) =
3104 (__le16 *)status_blk.e1x_sb->sb.index_values;
3105 bnx2x_fp(bp, index, sb_running_index) =
3106 (__le16 *)status_blk.e1x_sb->sb.running_index;
3107 }
3108}
3109
3110static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3111{
3112 union host_hc_status_block *sb;
3113 struct bnx2x_fastpath *fp = &bp->fp[index];
3114 int ring_size = 0;
6383c0b3 3115 u8 cos;
c2188952 3116 int rx_ring_size = 0;
b3b83c3f 3117
614c76df 3118#ifdef BCM_CNIC
1fdf1551 3119 if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
614c76df
DK
3120 rx_ring_size = MIN_RX_SIZE_NONTPA;
3121 bp->rx_ring_size = rx_ring_size;
3122 } else
3123#endif
c2188952 3124 if (!bp->rx_ring_size) {
b3b83c3f 3125
c2188952
VZ
3126 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3127
3128 /* allocate at least number of buffers required by FW */
3129 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3130 MIN_RX_SIZE_TPA, rx_ring_size);
3131
3132 bp->rx_ring_size = rx_ring_size;
614c76df 3133 } else /* if rx_ring_size specified - use it */
c2188952 3134 rx_ring_size = bp->rx_ring_size;
b3b83c3f 3135
b3b83c3f
DK
3136 /* Common */
3137 sb = &bnx2x_fp(bp, index, status_blk);
3138#ifdef BCM_CNIC
3139 if (!IS_FCOE_IDX(index)) {
3140#endif
3141 /* status blocks */
619c5cb6 3142 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3143 BNX2X_PCI_ALLOC(sb->e2_sb,
3144 &bnx2x_fp(bp, index, status_blk_mapping),
3145 sizeof(struct host_hc_status_block_e2));
3146 else
3147 BNX2X_PCI_ALLOC(sb->e1x_sb,
3148 &bnx2x_fp(bp, index, status_blk_mapping),
3149 sizeof(struct host_hc_status_block_e1x));
3150#ifdef BCM_CNIC
3151 }
3152#endif
8eef2af1
DK
3153
3154 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3155 * set shortcuts for it.
3156 */
3157 if (!IS_FCOE_IDX(index))
3158 set_sb_shortcuts(bp, index);
b3b83c3f
DK
3159
3160 /* Tx */
3161 if (!skip_tx_queue(bp, index)) {
3162 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3
AE
3163 for_each_cos_in_tx_queue(fp, cos) {
3164 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3165
3166 DP(BNX2X_MSG_SP, "allocating tx memory of "
94f05b0f 3167 "fp %d cos %d\n",
6383c0b3
AE
3168 index, cos);
3169
3170 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 3171 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
3172 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3173 &txdata->tx_desc_mapping,
b3b83c3f 3174 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 3175 }
b3b83c3f
DK
3176 }
3177
3178 /* Rx */
3179 if (!skip_rx_queue(bp, index)) {
3180 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3181 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3182 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3183 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3184 &bnx2x_fp(bp, index, rx_desc_mapping),
3185 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3186
3187 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3188 &bnx2x_fp(bp, index, rx_comp_mapping),
3189 sizeof(struct eth_fast_path_rx_cqe) *
3190 NUM_RCQ_BD);
3191
3192 /* SGE ring */
3193 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3194 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3195 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3196 &bnx2x_fp(bp, index, rx_sge_mapping),
3197 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3198 /* RX BD ring */
3199 bnx2x_set_next_page_rx_bd(fp);
3200
3201 /* CQ ring */
3202 bnx2x_set_next_page_rx_cq(fp);
3203
3204 /* BDs */
3205 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3206 if (ring_size < rx_ring_size)
3207 goto alloc_mem_err;
3208 }
3209
3210 return 0;
3211
3212/* handles low memory cases */
3213alloc_mem_err:
3214 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3215 index, ring_size);
3216 /* FW will drop all packets if queue is not big enough,
3217 * In these cases we disable the queue
6383c0b3 3218 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
3219 */
3220 if (ring_size < (fp->disable_tpa ?
eb722d7a 3221 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
3222 /* release memory allocated for this queue */
3223 bnx2x_free_fp_mem_at(bp, index);
3224 return -ENOMEM;
3225 }
3226 return 0;
3227}
3228
3229int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3230{
3231 int i;
3232
3233 /**
3234 * 1. Allocate FP for leading - fatal if error
3235 * 2. {CNIC} Allocate FCoE FP - fatal if error
6383c0b3
AE
3236 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3237 * 4. Allocate RSS - fix number of queues if error
b3b83c3f
DK
3238 */
3239
3240 /* leading */
3241 if (bnx2x_alloc_fp_mem_at(bp, 0))
3242 return -ENOMEM;
6383c0b3 3243
b3b83c3f 3244#ifdef BCM_CNIC
8eef2af1
DK
3245 if (!NO_FCOE(bp))
3246 /* FCoE */
3247 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3248 /* we will fail load process instead of mark
3249 * NO_FCOE_FLAG
3250 */
3251 return -ENOMEM;
b3b83c3f 3252#endif
6383c0b3 3253
b3b83c3f
DK
3254 /* RSS */
3255 for_each_nondefault_eth_queue(bp, i)
3256 if (bnx2x_alloc_fp_mem_at(bp, i))
3257 break;
3258
3259 /* handle memory failures */
3260 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3261 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3262
3263 WARN_ON(delta < 0);
3264#ifdef BCM_CNIC
3265 /**
3266 * move non eth FPs next to last eth FP
3267 * must be done in that order
3268 * FCOE_IDX < FWD_IDX < OOO_IDX
3269 */
3270
6383c0b3 3271 /* move FCoE fp even NO_FCOE_FLAG is on */
b3b83c3f
DK
3272 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3273#endif
3274 bp->num_queues -= delta;
3275 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3276 bp->num_queues + delta, bp->num_queues);
3277 }
3278
3279 return 0;
3280}
d6214d7a 3281
523224a3
DK
3282void bnx2x_free_mem_bp(struct bnx2x *bp)
3283{
3284 kfree(bp->fp);
3285 kfree(bp->msix_table);
3286 kfree(bp->ilt);
3287}
3288
3289int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3290{
3291 struct bnx2x_fastpath *fp;
3292 struct msix_entry *tbl;
3293 struct bnx2x_ilt *ilt;
6383c0b3
AE
3294 int msix_table_size = 0;
3295
3296 /*
3297 * The biggest MSI-X table we might need is as a maximum number of fast
3298 * path IGU SBs plus default SB (for PF).
3299 */
3300 msix_table_size = bp->igu_sb_cnt + 1;
523224a3 3301
6383c0b3 3302 /* fp array: RSS plus CNIC related L2 queues */
01e23742 3303 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
6383c0b3 3304 sizeof(*fp), GFP_KERNEL);
523224a3
DK
3305 if (!fp)
3306 goto alloc_err;
3307 bp->fp = fp;
3308
3309 /* msix table */
01e23742 3310 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
3311 if (!tbl)
3312 goto alloc_err;
3313 bp->msix_table = tbl;
3314
3315 /* ilt */
3316 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3317 if (!ilt)
3318 goto alloc_err;
3319 bp->ilt = ilt;
3320
3321 return 0;
3322alloc_err:
3323 bnx2x_free_mem_bp(bp);
3324 return -ENOMEM;
3325
3326}
3327
a9fccec7 3328int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
3329{
3330 struct bnx2x *bp = netdev_priv(dev);
3331
3332 if (unlikely(!netif_running(dev)))
3333 return 0;
3334
3335 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3336 return bnx2x_nic_load(bp, LOAD_NORMAL);
3337}
3338
1ac9e428
YR
3339int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3340{
3341 u32 sel_phy_idx = 0;
3342 if (bp->link_params.num_phys <= 1)
3343 return INT_PHY;
3344
3345 if (bp->link_vars.link_up) {
3346 sel_phy_idx = EXT_PHY1;
3347 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3348 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3349 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3350 sel_phy_idx = EXT_PHY2;
3351 } else {
3352
3353 switch (bnx2x_phy_selection(&bp->link_params)) {
3354 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3355 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3356 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3357 sel_phy_idx = EXT_PHY1;
3358 break;
3359 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3360 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3361 sel_phy_idx = EXT_PHY2;
3362 break;
3363 }
3364 }
3365
3366 return sel_phy_idx;
3367
3368}
3369int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3370{
3371 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3372 /*
3373 * The selected actived PHY is always after swapping (in case PHY
3374 * swapping is enabled). So when swapping is enabled, we need to reverse
3375 * the configuration
3376 */
3377
3378 if (bp->link_params.multi_phy_config &
3379 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3380 if (sel_phy_idx == EXT_PHY1)
3381 sel_phy_idx = EXT_PHY2;
3382 else if (sel_phy_idx == EXT_PHY2)
3383 sel_phy_idx = EXT_PHY1;
3384 }
3385 return LINK_CONFIG_IDX(sel_phy_idx);
3386}
3387
bf61ee14
VZ
3388#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3389int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3390{
3391 struct bnx2x *bp = netdev_priv(dev);
3392 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3393
3394 switch (type) {
3395 case NETDEV_FCOE_WWNN:
3396 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3397 cp->fcoe_wwn_node_name_lo);
3398 break;
3399 case NETDEV_FCOE_WWPN:
3400 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3401 cp->fcoe_wwn_port_name_lo);
3402 break;
3403 default:
3404 return -EINVAL;
3405 }
3406
3407 return 0;
3408}
3409#endif
3410
9f6c9258
DK
3411/* called with rtnl_lock */
3412int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3413{
3414 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
3415
3416 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
95c6c616 3417 netdev_err(dev, "Handling parity error recovery. Try again later\n");
9f6c9258
DK
3418 return -EAGAIN;
3419 }
3420
3421 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3422 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3423 return -EINVAL;
3424
3425 /* This does not race with packet allocation
3426 * because the actual alloc size is
3427 * only updated as part of load
3428 */
3429 dev->mtu = new_mtu;
3430
66371c44
MM
3431 return bnx2x_reload_if_running(dev);
3432}
3433
c8f44aff
MM
3434netdev_features_t bnx2x_fix_features(struct net_device *dev,
3435 netdev_features_t features)
66371c44
MM
3436{
3437 struct bnx2x *bp = netdev_priv(dev);
3438
3439 /* TPA requires Rx CSUM offloading */
3440 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3441 features &= ~NETIF_F_LRO;
3442
3443 return features;
3444}
3445
c8f44aff 3446int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
3447{
3448 struct bnx2x *bp = netdev_priv(dev);
3449 u32 flags = bp->flags;
538dd2e3 3450 bool bnx2x_reload = false;
66371c44
MM
3451
3452 if (features & NETIF_F_LRO)
3453 flags |= TPA_ENABLE_FLAG;
3454 else
3455 flags &= ~TPA_ENABLE_FLAG;
3456
538dd2e3
MB
3457 if (features & NETIF_F_LOOPBACK) {
3458 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3459 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3460 bnx2x_reload = true;
3461 }
3462 } else {
3463 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3464 bp->link_params.loopback_mode = LOOPBACK_NONE;
3465 bnx2x_reload = true;
3466 }
3467 }
3468
66371c44
MM
3469 if (flags ^ bp->flags) {
3470 bp->flags = flags;
538dd2e3
MB
3471 bnx2x_reload = true;
3472 }
66371c44 3473
538dd2e3 3474 if (bnx2x_reload) {
66371c44
MM
3475 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3476 return bnx2x_reload_if_running(dev);
3477 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
3478 }
3479
66371c44 3480 return 0;
9f6c9258
DK
3481}
3482
3483void bnx2x_tx_timeout(struct net_device *dev)
3484{
3485 struct bnx2x *bp = netdev_priv(dev);
3486
3487#ifdef BNX2X_STOP_ON_ERROR
3488 if (!bp->panic)
3489 bnx2x_panic();
3490#endif
7be08a72
AE
3491
3492 smp_mb__before_clear_bit();
3493 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3494 smp_mb__after_clear_bit();
3495
9f6c9258 3496 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 3497 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
3498}
3499
9f6c9258
DK
3500int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3501{
3502 struct net_device *dev = pci_get_drvdata(pdev);
3503 struct bnx2x *bp;
3504
3505 if (!dev) {
3506 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3507 return -ENODEV;
3508 }
3509 bp = netdev_priv(dev);
3510
3511 rtnl_lock();
3512
3513 pci_save_state(pdev);
3514
3515 if (!netif_running(dev)) {
3516 rtnl_unlock();
3517 return 0;
3518 }
3519
3520 netif_device_detach(dev);
3521
3522 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3523
3524 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3525
3526 rtnl_unlock();
3527
3528 return 0;
3529}
3530
3531int bnx2x_resume(struct pci_dev *pdev)
3532{
3533 struct net_device *dev = pci_get_drvdata(pdev);
3534 struct bnx2x *bp;
3535 int rc;
3536
3537 if (!dev) {
3538 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3539 return -ENODEV;
3540 }
3541 bp = netdev_priv(dev);
3542
3543 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
95c6c616 3544 netdev_err(dev, "Handling parity error recovery. Try again later\n");
9f6c9258
DK
3545 return -EAGAIN;
3546 }
3547
3548 rtnl_lock();
3549
3550 pci_restore_state(pdev);
3551
3552 if (!netif_running(dev)) {
3553 rtnl_unlock();
3554 return 0;
3555 }
3556
3557 bnx2x_set_power_state(bp, PCI_D0);
3558 netif_device_attach(dev);
3559
3560 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3561
3562 rtnl_unlock();
3563
3564 return rc;
3565}
619c5cb6
VZ
3566
3567
3568void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3569 u32 cid)
3570{
3571 /* ustorm cxt validation */
3572 cxt->ustorm_ag_context.cdu_usage =
3573 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3574 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3575 /* xcontext validation */
3576 cxt->xstorm_ag_context.cdu_reserved =
3577 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3578 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3579}
3580
3581static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3582 u8 fw_sb_id, u8 sb_index,
3583 u8 ticks)
3584{
3585
3586 u32 addr = BAR_CSTRORM_INTMEM +
3587 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3588 REG_WR8(bp, addr, ticks);
3589 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3590 port, fw_sb_id, sb_index, ticks);
3591}
3592
3593static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3594 u16 fw_sb_id, u8 sb_index,
3595 u8 disable)
3596{
3597 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3598 u32 addr = BAR_CSTRORM_INTMEM +
3599 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3600 u16 flags = REG_RD16(bp, addr);
3601 /* clear and set */
3602 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3603 flags |= enable_flag;
3604 REG_WR16(bp, addr, flags);
3605 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3606 port, fw_sb_id, sb_index, disable);
3607}
3608
3609void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3610 u8 sb_index, u8 disable, u16 usec)
3611{
3612 int port = BP_PORT(bp);
3613 u8 ticks = usec / BNX2X_BTR;
3614
3615 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3616
3617 disable = disable ? 1 : (usec ? 0 : 1);
3618 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3619}