bnx2x: Management can control PFC/ETS
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
85b26ea1 3 * Copyright (c) 2007-2012 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
f2e0899f 24#include <net/ipv6.h>
7f3e01fe 25#include <net/ip6_checksum.h>
c0cba59e 26#include <linux/prefetch.h>
9f6c9258 27#include "bnx2x_cmn.h"
523224a3 28#include "bnx2x_init.h"
042181f5 29#include "bnx2x_sp.h"
523224a3 30
619c5cb6 31
9f6c9258 32
b3b83c3f
DK
33/**
34 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
65565884
MS
43 * source onto the target. Update txdata pointers and related
44 * content.
b3b83c3f
DK
45 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
72754080
AE
56
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
59
b3b83c3f
DK
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
62 to_fp->index = to;
65565884 63
15192a8c
BW
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
65565884
MS
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
73 */
74
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 (bp)->max_cos;
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 }
82
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
87}
88
619c5cb6
VZ
89int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
90
9f6c9258
DK
91/* free skb in the packet ring at pos idx
92 * return idx of last bd freed
93 */
6383c0b3 94static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
95 u16 idx, unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
9f6c9258 97{
6383c0b3 98 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
99 struct eth_tx_start_bd *tx_start_bd;
100 struct eth_tx_bd *tx_data_bd;
101 struct sk_buff *skb = tx_buf->skb;
102 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
103 int nbd;
104
105 /* prefetch skb end pointer to speedup dev_kfree_skb() */
106 prefetch(&skb->end);
107
51c1a580 108 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 109 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
110
111 /* unmap first bd */
6383c0b3 112 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 113 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 114 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 115
619c5cb6 116
9f6c9258
DK
117 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118#ifdef BNX2X_STOP_ON_ERROR
119 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120 BNX2X_ERR("BAD nbd!\n");
121 bnx2x_panic();
122 }
123#endif
124 new_cons = nbd + tx_buf->first_bd;
125
126 /* Get the next bd */
127 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
128
129 /* Skip a parse bd... */
130 --nbd;
131 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
132
133 /* ...and the TSO split header bd since they have no mapping */
134 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
135 --nbd;
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
137 }
138
139 /* now free frags */
140 while (nbd > 0) {
141
6383c0b3 142 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
143 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
145 if (--nbd)
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
147 }
148
149 /* release skb */
150 WARN_ON(!skb);
d8290ae5 151 if (likely(skb)) {
2df1a70a
TH
152 (*pkts_compl)++;
153 (*bytes_compl) += skb->len;
154 }
d8290ae5 155
40955532 156 dev_kfree_skb_any(skb);
9f6c9258
DK
157 tx_buf->first_bd = 0;
158 tx_buf->skb = NULL;
159
160 return new_cons;
161}
162
6383c0b3 163int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 164{
9f6c9258 165 struct netdev_queue *txq;
6383c0b3 166 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 167 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
168
169#ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
171 return -1;
172#endif
173
6383c0b3
AE
174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
177
178 while (sw_cons != hw_cons) {
179 u16 pkt_cons;
180
181 pkt_cons = TX_BD(sw_cons);
182
51c1a580
MS
183 DP(NETIF_MSG_TX_DONE,
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 186
2df1a70a
TH
187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
189
9f6c9258
DK
190 sw_cons++;
191 }
192
2df1a70a
TH
193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
194
6383c0b3
AE
195 txdata->tx_pkt_cons = sw_cons;
196 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
197
198 /* Need to make the tx_bd_cons update visible to start_xmit()
199 * before checking for netif_tx_queue_stopped(). Without the
200 * memory barrier, there is a small possibility that
201 * start_xmit() will miss it and cause the queue to be stopped
202 * forever.
619c5cb6
VZ
203 * On the other hand we need an rmb() here to ensure the proper
204 * ordering of bit testing in the following
205 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
206 */
207 smp_mb();
208
9f6c9258
DK
209 if (unlikely(netif_tx_queue_stopped(txq))) {
210 /* Taking tx_lock() is needed to prevent reenabling the queue
211 * while it's empty. This could have happen if rx_action() gets
212 * suspended in bnx2x_tx_int() after the condition before
213 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
214 *
215 * stops the queue->sees fresh tx_bd_cons->releases the queue->
216 * sends some packets consuming the whole queue again->
217 * stops the queue
218 */
219
220 __netif_tx_lock(txq, smp_processor_id());
221
222 if ((netif_tx_queue_stopped(txq)) &&
223 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 224 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
225 netif_tx_wake_queue(txq);
226
227 __netif_tx_unlock(txq);
228 }
229 return 0;
230}
231
232static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
233 u16 idx)
234{
235 u16 last_max = fp->last_max_sge;
236
237 if (SUB_S16(idx, last_max) > 0)
238 fp->last_max_sge = idx;
239}
240
621b4d66
DK
241static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
242 u16 sge_len,
243 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
244{
245 struct bnx2x *bp = fp->bp;
9f6c9258
DK
246 u16 last_max, last_elem, first_elem;
247 u16 delta = 0;
248 u16 i;
249
250 if (!sge_len)
251 return;
252
253 /* First mark all used pages */
254 for (i = 0; i < sge_len; i++)
619c5cb6 255 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 256 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
257
258 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 259 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
260
261 /* Here we assume that the last SGE index is the biggest */
262 prefetch((void *)(fp->sge_mask));
523224a3 263 bnx2x_update_last_max_sge(fp,
621b4d66 264 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
265
266 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
267 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
269
270 /* If ring is not full */
271 if (last_elem + 1 != first_elem)
272 last_elem++;
273
274 /* Now update the prod */
275 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276 if (likely(fp->sge_mask[i]))
277 break;
278
619c5cb6
VZ
279 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
281 }
282
283 if (delta > 0) {
284 fp->rx_sge_prod += delta;
285 /* clear page-end entries */
286 bnx2x_clear_sge_mask_next_elems(fp);
287 }
288
289 DP(NETIF_MSG_RX_STATUS,
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
291 fp->last_max_sge, fp->rx_sge_prod);
292}
293
e52fcb24
ED
294/* Set Toeplitz hash value in the skb using the value from the
295 * CQE (calculated by HW).
296 */
297static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
298 const struct eth_fast_path_rx_cqe *cqe,
299 bool *l4_rxhash)
e52fcb24
ED
300{
301 /* Set Toeplitz hash from CQE */
302 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
303 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304 enum eth_rss_hash_type htype;
305
306 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 309 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
310 }
311 *l4_rxhash = false;
e52fcb24
ED
312 return 0;
313}
314
9f6c9258 315static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 316 u16 cons, u16 prod,
619c5cb6 317 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
318{
319 struct bnx2x *bp = fp->bp;
320 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
321 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
322 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
323 dma_addr_t mapping;
619c5cb6
VZ
324 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
325 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 326
619c5cb6
VZ
327 /* print error if current state != stop */
328 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
330
e52fcb24 331 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 332 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 333 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
334 fp->rx_buf_size, DMA_FROM_DEVICE);
335 /*
336 * ...if it fails - move the skb from the consumer to the producer
337 * and set the current aggregation state as ERROR to drop it
338 * when TPA_STOP arrives.
339 */
340
341 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
342 /* Move the BD from the consumer to the producer */
e52fcb24 343 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
344 tpa_info->tpa_state = BNX2X_TPA_ERROR;
345 return;
346 }
9f6c9258 347
e52fcb24
ED
348 /* move empty data from pool to prod */
349 prod_rx_buf->data = first_buf->data;
619c5cb6 350 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 351 /* point prod_bd to new data */
9f6c9258
DK
352 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
353 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
354
619c5cb6
VZ
355 /* move partial skb from cons to pool (don't unmap yet) */
356 *first_buf = *cons_rx_buf;
357
358 /* mark bin state as START */
359 tpa_info->parsing_flags =
360 le16_to_cpu(cqe->pars_flags.flags);
361 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
362 tpa_info->tpa_state = BNX2X_TPA_START;
363 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
364 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 365 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
366 if (fp->mode == TPA_MODE_GRO) {
367 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
368 tpa_info->full_page =
369 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
370 tpa_info->gro_size = gro_size;
371 }
619c5cb6 372
9f6c9258
DK
373#ifdef BNX2X_STOP_ON_ERROR
374 fp->tpa_queue_used |= (1 << queue);
375#ifdef _ASM_GENERIC_INT_L64_H
376 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
377#else
378 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
379#endif
380 fp->tpa_queue_used);
381#endif
382}
383
e4e3c02a
VZ
384/* Timestamp option length allowed for TPA aggregation:
385 *
386 * nop nop kind length echo val
387 */
388#define TPA_TSTAMP_OPT_LEN 12
389/**
e8920674 390 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 391 *
e8920674
DK
392 * @bp: driver handle
393 * @parsing_flags: parsing flags from the START CQE
394 * @len_on_bd: total length of the first packet for the
395 * aggregation.
396 *
397 * Approximate value of the MSS for this aggregation calculated using
398 * the first packet of it.
e4e3c02a 399 */
1191cb83
ED
400static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
401 u16 len_on_bd)
e4e3c02a 402{
619c5cb6
VZ
403 /*
404 * TPA arrgregation won't have either IP options or TCP options
405 * other than timestamp or IPv6 extension headers.
e4e3c02a 406 */
619c5cb6
VZ
407 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
408
409 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
410 PRS_FLAG_OVERETH_IPV6)
411 hdrs_len += sizeof(struct ipv6hdr);
412 else /* IPv4 */
413 hdrs_len += sizeof(struct iphdr);
e4e3c02a
VZ
414
415
416 /* Check if there was a TCP timestamp, if there is it's will
417 * always be 12 bytes length: nop nop kind length echo val.
418 *
419 * Otherwise FW would close the aggregation.
420 */
421 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
422 hdrs_len += TPA_TSTAMP_OPT_LEN;
423
424 return len_on_bd - hdrs_len;
425}
426
1191cb83
ED
427static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
429{
430 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
431 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
433 dma_addr_t mapping;
434
435 if (unlikely(page == NULL)) {
436 BNX2X_ERR("Can't alloc sge\n");
437 return -ENOMEM;
438 }
439
440 mapping = dma_map_page(&bp->pdev->dev, page, 0,
441 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
442 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443 __free_pages(page, PAGES_PER_SGE_SHIFT);
444 BNX2X_ERR("Can't map sge\n");
445 return -ENOMEM;
446 }
447
448 sw_buf->page = page;
449 dma_unmap_addr_set(sw_buf, mapping, mapping);
450
451 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
452 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
453
454 return 0;
455}
456
9f6c9258 457static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
458 struct bnx2x_agg_info *tpa_info,
459 u16 pages,
460 struct sk_buff *skb,
619c5cb6
VZ
461 struct eth_end_agg_rx_cqe *cqe,
462 u16 cqe_idx)
9f6c9258
DK
463{
464 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
465 u32 i, frag_len, frag_size;
466 int err, j, frag_id = 0;
619c5cb6 467 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 468 u16 full_page = 0, gro_size = 0;
9f6c9258 469
619c5cb6 470 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
471
472 if (fp->mode == TPA_MODE_GRO) {
473 gro_size = tpa_info->gro_size;
474 full_page = tpa_info->full_page;
475 }
9f6c9258
DK
476
477 /* This is needed in order to enable forwarding support */
621b4d66 478 if (frag_size) {
619c5cb6
VZ
479 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
480 tpa_info->parsing_flags, len_on_bd);
9f6c9258 481
621b4d66
DK
482 /* set for GRO */
483 if (fp->mode == TPA_MODE_GRO)
484 skb_shinfo(skb)->gso_type =
485 (GET_FLAG(tpa_info->parsing_flags,
486 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
487 PRS_FLAG_OVERETH_IPV6) ?
488 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
489 }
490
491
9f6c9258
DK
492#ifdef BNX2X_STOP_ON_ERROR
493 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
494 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
495 pages, cqe_idx);
619c5cb6 496 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
497 bnx2x_panic();
498 return -EINVAL;
499 }
500#endif
501
502 /* Run through the SGL and compose the fragmented skb */
503 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 504 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
505
506 /* FW gives the indices of the SGE as if the ring is an array
507 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
508 if (fp->mode == TPA_MODE_GRO)
509 frag_len = min_t(u32, frag_size, (u32)full_page);
510 else /* LRO */
511 frag_len = min_t(u32, frag_size,
512 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
513
9f6c9258
DK
514 rx_pg = &fp->rx_page_ring[sge_idx];
515 old_rx_pg = *rx_pg;
516
517 /* If we fail to allocate a substitute page, we simply stop
518 where we are and drop the whole packet */
519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
520 if (unlikely(err)) {
15192a8c 521 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
522 return err;
523 }
524
525 /* Unmap the page as we r going to pass it to the stack */
526 dma_unmap_page(&bp->pdev->dev,
527 dma_unmap_addr(&old_rx_pg, mapping),
528 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
9f6c9258 529 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
530 if (fp->mode == TPA_MODE_LRO)
531 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
532 else { /* GRO */
533 int rem;
534 int offset = 0;
535 for (rem = frag_len; rem > 0; rem -= gro_size) {
536 int len = rem > gro_size ? gro_size : rem;
537 skb_fill_page_desc(skb, frag_id++,
538 old_rx_pg.page, offset, len);
539 if (offset)
540 get_page(old_rx_pg.page);
541 offset += len;
542 }
543 }
9f6c9258
DK
544
545 skb->data_len += frag_len;
e1ac50f6 546 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
9f6c9258
DK
547 skb->len += frag_len;
548
549 frag_size -= frag_len;
550 }
551
552 return 0;
553}
554
1191cb83
ED
555static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
556 struct bnx2x_agg_info *tpa_info,
557 u16 pages,
558 struct eth_end_agg_rx_cqe *cqe,
559 u16 cqe_idx)
9f6c9258 560{
619c5cb6 561 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 562 u8 pad = tpa_info->placement_offset;
619c5cb6 563 u16 len = tpa_info->len_on_bd;
e52fcb24 564 struct sk_buff *skb = NULL;
621b4d66 565 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
566 u8 old_tpa_state = tpa_info->tpa_state;
567
568 tpa_info->tpa_state = BNX2X_TPA_STOP;
569
570 /* If we there was an error during the handling of the TPA_START -
571 * drop this aggregation.
572 */
573 if (old_tpa_state == BNX2X_TPA_ERROR)
574 goto drop;
575
e52fcb24
ED
576 /* Try to allocate the new data */
577 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
9f6c9258
DK
578
579 /* Unmap skb in the pool anyway, as we are going to change
580 pool entry status to BNX2X_TPA_STOP even if new skb allocation
581 fails. */
582 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 583 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 584 if (likely(new_data))
d3836f21 585 skb = build_skb(data, 0);
9f6c9258 586
e52fcb24 587 if (likely(skb)) {
9f6c9258 588#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 589 if (pad + len > fp->rx_buf_size) {
51c1a580 590 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 591 pad, len, fp->rx_buf_size);
9f6c9258
DK
592 bnx2x_panic();
593 return;
594 }
595#endif
596
e52fcb24 597 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 598 skb_put(skb, len);
e52fcb24 599 skb->rxhash = tpa_info->rxhash;
a334b5fb 600 skb->l4_rxhash = tpa_info->l4_rxhash;
9f6c9258
DK
601
602 skb->protocol = eth_type_trans(skb, bp->dev);
603 skb->ip_summed = CHECKSUM_UNNECESSARY;
604
621b4d66
DK
605 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
606 skb, cqe, cqe_idx)) {
619c5cb6
VZ
607 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
608 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9bcc0893 609 napi_gro_receive(&fp->napi, skb);
9f6c9258 610 } else {
51c1a580
MS
611 DP(NETIF_MSG_RX_STATUS,
612 "Failed to allocate new pages - dropping packet!\n");
40955532 613 dev_kfree_skb_any(skb);
9f6c9258
DK
614 }
615
616
e52fcb24
ED
617 /* put new data in bin */
618 rx_buf->data = new_data;
9f6c9258 619
619c5cb6 620 return;
9f6c9258 621 }
3f61cd87 622 kfree(new_data);
619c5cb6
VZ
623drop:
624 /* drop the packet and keep the buffer in the bin */
625 DP(NETIF_MSG_RX_STATUS,
626 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 627 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
628}
629
1191cb83
ED
630static int bnx2x_alloc_rx_data(struct bnx2x *bp,
631 struct bnx2x_fastpath *fp, u16 index)
632{
633 u8 *data;
634 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
635 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
636 dma_addr_t mapping;
637
638 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
639 if (unlikely(data == NULL))
640 return -ENOMEM;
641
642 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
643 fp->rx_buf_size,
644 DMA_FROM_DEVICE);
645 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
646 kfree(data);
647 BNX2X_ERR("Can't map rx data\n");
648 return -ENOMEM;
649 }
650
651 rx_buf->data = data;
652 dma_unmap_addr_set(rx_buf, mapping, mapping);
653
654 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
655 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
656
657 return 0;
658}
659
15192a8c
BW
660static
661void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
662 struct bnx2x_fastpath *fp,
663 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 664{
e488921f
MS
665 /* Do nothing if no L4 csum validation was done.
666 * We do not check whether IP csum was validated. For IPv4 we assume
667 * that if the card got as far as validating the L4 csum, it also
668 * validated the IP csum. IPv6 has no IP csum.
669 */
d6cb3e41 670 if (cqe->fast_path_cqe.status_flags &
e488921f 671 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
672 return;
673
e488921f 674 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
675
676 if (cqe->fast_path_cqe.type_error_flags &
677 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
678 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 679 qstats->hw_csum_err++;
d6cb3e41
ED
680 else
681 skb->ip_summed = CHECKSUM_UNNECESSARY;
682}
9f6c9258
DK
683
684int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
685{
686 struct bnx2x *bp = fp->bp;
687 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
688 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
689 int rx_pkt = 0;
690
691#ifdef BNX2X_STOP_ON_ERROR
692 if (unlikely(bp->panic))
693 return 0;
694#endif
695
696 /* CQ "next element" is of the size of the regular element,
697 that's why it's ok here */
698 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
699 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
700 hw_comp_cons++;
701
702 bd_cons = fp->rx_bd_cons;
703 bd_prod = fp->rx_bd_prod;
704 bd_prod_fw = bd_prod;
705 sw_comp_cons = fp->rx_comp_cons;
706 sw_comp_prod = fp->rx_comp_prod;
707
708 /* Memory barrier necessary as speculative reads of the rx
709 * buffer can be ahead of the index in the status block
710 */
711 rmb();
712
713 DP(NETIF_MSG_RX_STATUS,
714 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
715 fp->index, hw_comp_cons, sw_comp_cons);
716
717 while (sw_comp_cons != hw_comp_cons) {
718 struct sw_rx_bd *rx_buf = NULL;
719 struct sk_buff *skb;
720 union eth_rx_cqe *cqe;
619c5cb6 721 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 722 u8 cqe_fp_flags;
619c5cb6 723 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 724 u16 len, pad, queue;
e52fcb24 725 u8 *data;
a334b5fb 726 bool l4_rxhash;
9f6c9258 727
619c5cb6
VZ
728#ifdef BNX2X_STOP_ON_ERROR
729 if (unlikely(bp->panic))
730 return 0;
731#endif
732
9f6c9258
DK
733 comp_ring_cons = RCQ_BD(sw_comp_cons);
734 bd_prod = RX_BD(bd_prod);
735 bd_cons = RX_BD(bd_cons);
736
9f6c9258 737 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
738 cqe_fp = &cqe->fast_path_cqe;
739 cqe_fp_flags = cqe_fp->type_error_flags;
740 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 741
51c1a580
MS
742 DP(NETIF_MSG_RX_STATUS,
743 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
744 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
745 cqe_fp_flags, cqe_fp->status_flags,
746 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
747 le16_to_cpu(cqe_fp->vlan_tag),
748 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
749
750 /* is this a slowpath msg? */
619c5cb6 751 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
752 bnx2x_sp_event(fp, cqe);
753 goto next_cqe;
e52fcb24 754 }
621b4d66 755
e52fcb24
ED
756 rx_buf = &fp->rx_buf_ring[bd_cons];
757 data = rx_buf->data;
9f6c9258 758
e52fcb24 759 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
760 struct bnx2x_agg_info *tpa_info;
761 u16 frag_size, pages;
619c5cb6 762#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
763 /* sanity check */
764 if (fp->disable_tpa &&
765 (CQE_TYPE_START(cqe_fp_type) ||
766 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 767 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 768 CQE_TYPE(cqe_fp_type));
619c5cb6 769#endif
9f6c9258 770
e52fcb24
ED
771 if (CQE_TYPE_START(cqe_fp_type)) {
772 u16 queue = cqe_fp->queue_index;
773 DP(NETIF_MSG_RX_STATUS,
774 "calling tpa_start on queue %d\n",
775 queue);
9f6c9258 776
e52fcb24
ED
777 bnx2x_tpa_start(fp, queue,
778 bd_cons, bd_prod,
779 cqe_fp);
621b4d66 780
e52fcb24 781 goto next_rx;
e52fcb24 782
621b4d66
DK
783 }
784 queue = cqe->end_agg_cqe.queue_index;
785 tpa_info = &fp->tpa_info[queue];
786 DP(NETIF_MSG_RX_STATUS,
787 "calling tpa_stop on queue %d\n",
788 queue);
789
790 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
791 tpa_info->len_on_bd;
792
793 if (fp->mode == TPA_MODE_GRO)
794 pages = (frag_size + tpa_info->full_page - 1) /
795 tpa_info->full_page;
796 else
797 pages = SGE_PAGE_ALIGN(frag_size) >>
798 SGE_PAGE_SHIFT;
799
800 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
801 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 802#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
803 if (bp->panic)
804 return 0;
9f6c9258
DK
805#endif
806
621b4d66
DK
807 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
808 goto next_cqe;
e52fcb24
ED
809 }
810 /* non TPA */
621b4d66 811 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
812 pad = cqe_fp->placement_offset;
813 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 814 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
815 pad + RX_COPY_THRESH,
816 DMA_FROM_DEVICE);
817 pad += NET_SKB_PAD;
818 prefetch(data + pad); /* speedup eth_type_trans() */
819 /* is this an error packet? */
820 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 821 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
822 "ERROR flags %x rx packet %u\n",
823 cqe_fp_flags, sw_comp_cons);
15192a8c 824 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
825 goto reuse_rx;
826 }
9f6c9258 827
e52fcb24
ED
828 /* Since we don't have a jumbo ring
829 * copy small packets if mtu > 1500
830 */
831 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
832 (len <= RX_COPY_THRESH)) {
833 skb = netdev_alloc_skb_ip_align(bp->dev, len);
834 if (skb == NULL) {
51c1a580 835 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 836 "ERROR packet dropped because of alloc failure\n");
15192a8c 837 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
838 goto reuse_rx;
839 }
e52fcb24
ED
840 memcpy(skb->data, data + pad, len);
841 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
842 } else {
843 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 844 dma_unmap_single(&bp->pdev->dev,
e52fcb24 845 dma_unmap_addr(rx_buf, mapping),
a8c94b91 846 fp->rx_buf_size,
9f6c9258 847 DMA_FROM_DEVICE);
d3836f21 848 skb = build_skb(data, 0);
e52fcb24
ED
849 if (unlikely(!skb)) {
850 kfree(data);
15192a8c
BW
851 bnx2x_fp_qstats(bp, fp)->
852 rx_skb_alloc_failed++;
e52fcb24
ED
853 goto next_rx;
854 }
9f6c9258 855 skb_reserve(skb, pad);
9f6c9258 856 } else {
51c1a580
MS
857 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
858 "ERROR packet dropped because of alloc failure\n");
15192a8c 859 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 860reuse_rx:
e52fcb24 861 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
862 goto next_rx;
863 }
036d2df9 864 }
9f6c9258 865
036d2df9
DK
866 skb_put(skb, len);
867 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 868
036d2df9 869 /* Set Toeplitz hash for a none-LRO skb */
a334b5fb
ED
870 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
871 skb->l4_rxhash = l4_rxhash;
9f6c9258 872
036d2df9 873 skb_checksum_none_assert(skb);
f85582f8 874
d6cb3e41 875 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
876 bnx2x_csum_validate(skb, cqe, fp,
877 bnx2x_fp_qstats(bp, fp));
9f6c9258 878
f233cafe 879 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 880
619c5cb6
VZ
881 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
882 PARSING_FLAGS_VLAN)
9bcc0893 883 __vlan_hwaccel_put_tag(skb,
619c5cb6 884 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 885 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
886
887
888next_rx:
e52fcb24 889 rx_buf->data = NULL;
9f6c9258
DK
890
891 bd_cons = NEXT_RX_IDX(bd_cons);
892 bd_prod = NEXT_RX_IDX(bd_prod);
893 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
894 rx_pkt++;
895next_cqe:
896 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
897 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
898
899 if (rx_pkt == budget)
900 break;
901 } /* while */
902
903 fp->rx_bd_cons = bd_cons;
904 fp->rx_bd_prod = bd_prod_fw;
905 fp->rx_comp_cons = sw_comp_cons;
906 fp->rx_comp_prod = sw_comp_prod;
907
908 /* Update producers */
909 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
910 fp->rx_sge_prod);
911
912 fp->rx_pkt += rx_pkt;
913 fp->rx_calls++;
914
915 return rx_pkt;
916}
917
918static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
919{
920 struct bnx2x_fastpath *fp = fp_cookie;
921 struct bnx2x *bp = fp->bp;
6383c0b3 922 u8 cos;
9f6c9258 923
51c1a580
MS
924 DP(NETIF_MSG_INTR,
925 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3
DK
926 fp->index, fp->fw_sb_id, fp->igu_sb_id);
927 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
928
929#ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
931 return IRQ_HANDLED;
932#endif
933
934 /* Handle Rx and Tx according to MSI-X vector */
935 prefetch(fp->rx_cons_sb);
6383c0b3
AE
936
937 for_each_cos_in_tx_queue(fp, cos)
65565884 938 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 939
523224a3 940 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
941 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
942
943 return IRQ_HANDLED;
944}
945
9f6c9258
DK
946/* HW Lock for shared dual port PHYs */
947void bnx2x_acquire_phy_lock(struct bnx2x *bp)
948{
949 mutex_lock(&bp->port.phy_mutex);
950
8203c4b6 951 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
952}
953
954void bnx2x_release_phy_lock(struct bnx2x *bp)
955{
8203c4b6 956 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
957
958 mutex_unlock(&bp->port.phy_mutex);
959}
960
0793f83f
DK
961/* calculates MF speed according to current linespeed and MF configuration */
962u16 bnx2x_get_mf_speed(struct bnx2x *bp)
963{
964 u16 line_speed = bp->link_vars.line_speed;
965 if (IS_MF(bp)) {
faa6fcbb
DK
966 u16 maxCfg = bnx2x_extract_max_cfg(bp,
967 bp->mf_config[BP_VN(bp)]);
968
969 /* Calculate the current MAX line speed limit for the MF
970 * devices
0793f83f 971 */
faa6fcbb
DK
972 if (IS_MF_SI(bp))
973 line_speed = (line_speed * maxCfg) / 100;
974 else { /* SD mode */
0793f83f
DK
975 u16 vn_max_rate = maxCfg * 100;
976
977 if (vn_max_rate < line_speed)
978 line_speed = vn_max_rate;
faa6fcbb 979 }
0793f83f
DK
980 }
981
982 return line_speed;
983}
984
2ae17f66
VZ
985/**
986 * bnx2x_fill_report_data - fill link report data to report
987 *
988 * @bp: driver handle
989 * @data: link state to update
990 *
991 * It uses a none-atomic bit operations because is called under the mutex.
992 */
1191cb83
ED
993static void bnx2x_fill_report_data(struct bnx2x *bp,
994 struct bnx2x_link_report_data *data)
2ae17f66
VZ
995{
996 u16 line_speed = bnx2x_get_mf_speed(bp);
997
998 memset(data, 0, sizeof(*data));
999
1000 /* Fill the report data: efective line speed */
1001 data->line_speed = line_speed;
1002
1003 /* Link is down */
1004 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1005 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1006 &data->link_report_flags);
1007
1008 /* Full DUPLEX */
1009 if (bp->link_vars.duplex == DUPLEX_FULL)
1010 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1011
1012 /* Rx Flow Control is ON */
1013 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1014 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1015
1016 /* Tx Flow Control is ON */
1017 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1018 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1019}
1020
1021/**
1022 * bnx2x_link_report - report link status to OS.
1023 *
1024 * @bp: driver handle
1025 *
1026 * Calls the __bnx2x_link_report() under the same locking scheme
1027 * as a link/PHY state managing code to ensure a consistent link
1028 * reporting.
1029 */
1030
9f6c9258
DK
1031void bnx2x_link_report(struct bnx2x *bp)
1032{
2ae17f66
VZ
1033 bnx2x_acquire_phy_lock(bp);
1034 __bnx2x_link_report(bp);
1035 bnx2x_release_phy_lock(bp);
1036}
9f6c9258 1037
2ae17f66
VZ
1038/**
1039 * __bnx2x_link_report - report link status to OS.
1040 *
1041 * @bp: driver handle
1042 *
1043 * None atomic inmlementation.
1044 * Should be called under the phy_lock.
1045 */
1046void __bnx2x_link_report(struct bnx2x *bp)
1047{
1048 struct bnx2x_link_report_data cur_data;
9f6c9258 1049
2ae17f66
VZ
1050 /* reread mf_cfg */
1051 if (!CHIP_IS_E1(bp))
1052 bnx2x_read_mf_cfg(bp);
1053
1054 /* Read the current link report info */
1055 bnx2x_fill_report_data(bp, &cur_data);
1056
1057 /* Don't report link down or exactly the same link status twice */
1058 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1059 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1060 &bp->last_reported_link.link_report_flags) &&
1061 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1062 &cur_data.link_report_flags)))
1063 return;
1064
1065 bp->link_cnt++;
9f6c9258 1066
2ae17f66
VZ
1067 /* We are going to report a new link parameters now -
1068 * remember the current data for the next time.
1069 */
1070 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1071
2ae17f66
VZ
1072 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1073 &cur_data.link_report_flags)) {
1074 netif_carrier_off(bp->dev);
1075 netdev_err(bp->dev, "NIC Link is Down\n");
1076 return;
1077 } else {
94f05b0f
JP
1078 const char *duplex;
1079 const char *flow;
1080
2ae17f66 1081 netif_carrier_on(bp->dev);
9f6c9258 1082
2ae17f66
VZ
1083 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1084 &cur_data.link_report_flags))
94f05b0f 1085 duplex = "full";
9f6c9258 1086 else
94f05b0f 1087 duplex = "half";
9f6c9258 1088
2ae17f66
VZ
1089 /* Handle the FC at the end so that only these flags would be
1090 * possibly set. This way we may easily check if there is no FC
1091 * enabled.
1092 */
1093 if (cur_data.link_report_flags) {
1094 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1095 &cur_data.link_report_flags)) {
2ae17f66
VZ
1096 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1097 &cur_data.link_report_flags))
94f05b0f
JP
1098 flow = "ON - receive & transmit";
1099 else
1100 flow = "ON - receive";
9f6c9258 1101 } else {
94f05b0f 1102 flow = "ON - transmit";
9f6c9258 1103 }
94f05b0f
JP
1104 } else {
1105 flow = "none";
9f6c9258 1106 }
94f05b0f
JP
1107 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1108 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1109 }
1110}
1111
1191cb83
ED
1112static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1113{
1114 int i;
1115
1116 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1117 struct eth_rx_sge *sge;
1118
1119 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1120 sge->addr_hi =
1121 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1122 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1123
1124 sge->addr_lo =
1125 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1126 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1127 }
1128}
1129
1130static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1131 struct bnx2x_fastpath *fp, int last)
1132{
1133 int i;
1134
1135 for (i = 0; i < last; i++) {
1136 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1137 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1138 u8 *data = first_buf->data;
1139
1140 if (data == NULL) {
1141 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1142 continue;
1143 }
1144 if (tpa_info->tpa_state == BNX2X_TPA_START)
1145 dma_unmap_single(&bp->pdev->dev,
1146 dma_unmap_addr(first_buf, mapping),
1147 fp->rx_buf_size, DMA_FROM_DEVICE);
1148 kfree(data);
1149 first_buf->data = NULL;
1150 }
1151}
1152
55c11941
MS
1153void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1154{
1155 int j;
1156
1157 for_each_rx_queue_cnic(bp, j) {
1158 struct bnx2x_fastpath *fp = &bp->fp[j];
1159
1160 fp->rx_bd_cons = 0;
1161
1162 /* Activate BD ring */
1163 /* Warning!
1164 * this will generate an interrupt (to the TSTORM)
1165 * must only be done after chip is initialized
1166 */
1167 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1168 fp->rx_sge_prod);
1169 }
1170}
1171
9f6c9258
DK
1172void bnx2x_init_rx_rings(struct bnx2x *bp)
1173{
1174 int func = BP_FUNC(bp);
523224a3 1175 u16 ring_prod;
9f6c9258 1176 int i, j;
25141580 1177
b3b83c3f 1178 /* Allocate TPA resources */
55c11941 1179 for_each_eth_queue(bp, j) {
523224a3 1180 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1181
a8c94b91
VZ
1182 DP(NETIF_MSG_IFUP,
1183 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1184
523224a3 1185 if (!fp->disable_tpa) {
619c5cb6 1186 /* Fill the per-aggregtion pool */
dfacf138 1187 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1188 struct bnx2x_agg_info *tpa_info =
1189 &fp->tpa_info[i];
1190 struct sw_rx_bd *first_buf =
1191 &tpa_info->first_buf;
1192
e52fcb24
ED
1193 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1194 GFP_ATOMIC);
1195 if (!first_buf->data) {
51c1a580
MS
1196 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1197 j);
9f6c9258
DK
1198 bnx2x_free_tpa_pool(bp, fp, i);
1199 fp->disable_tpa = 1;
1200 break;
1201 }
619c5cb6
VZ
1202 dma_unmap_addr_set(first_buf, mapping, 0);
1203 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1204 }
523224a3
DK
1205
1206 /* "next page" elements initialization */
1207 bnx2x_set_next_page_sgl(fp);
1208
1209 /* set SGEs bit mask */
1210 bnx2x_init_sge_ring_bit_mask(fp);
1211
1212 /* Allocate SGEs and initialize the ring elements */
1213 for (i = 0, ring_prod = 0;
1214 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1215
1216 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1217 BNX2X_ERR("was only able to allocate %d rx sges\n",
1218 i);
1219 BNX2X_ERR("disabling TPA for queue[%d]\n",
1220 j);
523224a3 1221 /* Cleanup already allocated elements */
619c5cb6
VZ
1222 bnx2x_free_rx_sge_range(bp, fp,
1223 ring_prod);
1224 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1225 MAX_AGG_QS(bp));
523224a3
DK
1226 fp->disable_tpa = 1;
1227 ring_prod = 0;
1228 break;
1229 }
1230 ring_prod = NEXT_SGE_IDX(ring_prod);
1231 }
1232
1233 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1234 }
1235 }
1236
55c11941 1237 for_each_eth_queue(bp, j) {
9f6c9258
DK
1238 struct bnx2x_fastpath *fp = &bp->fp[j];
1239
1240 fp->rx_bd_cons = 0;
9f6c9258 1241
b3b83c3f
DK
1242 /* Activate BD ring */
1243 /* Warning!
1244 * this will generate an interrupt (to the TSTORM)
1245 * must only be done after chip is initialized
1246 */
1247 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1248 fp->rx_sge_prod);
9f6c9258 1249
9f6c9258
DK
1250 if (j != 0)
1251 continue;
1252
619c5cb6 1253 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1254 REG_WR(bp, BAR_USTRORM_INTMEM +
1255 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1256 U64_LO(fp->rx_comp_mapping));
1257 REG_WR(bp, BAR_USTRORM_INTMEM +
1258 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1259 U64_HI(fp->rx_comp_mapping));
1260 }
9f6c9258
DK
1261 }
1262}
f85582f8 1263
55c11941 1264static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1265{
6383c0b3 1266 u8 cos;
55c11941 1267 struct bnx2x *bp = fp->bp;
9f6c9258 1268
55c11941
MS
1269 for_each_cos_in_tx_queue(fp, cos) {
1270 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1271 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1272
55c11941
MS
1273 u16 sw_prod = txdata->tx_pkt_prod;
1274 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1275
55c11941
MS
1276 while (sw_cons != sw_prod) {
1277 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1278 &pkts_compl, &bytes_compl);
1279 sw_cons++;
9f6c9258 1280 }
55c11941
MS
1281
1282 netdev_tx_reset_queue(
1283 netdev_get_tx_queue(bp->dev,
1284 txdata->txq_index));
1285 }
1286}
1287
1288static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1289{
1290 int i;
1291
1292 for_each_tx_queue_cnic(bp, i) {
1293 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1294 }
1295}
1296
1297static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1298{
1299 int i;
1300
1301 for_each_eth_queue(bp, i) {
1302 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1303 }
1304}
1305
b3b83c3f
DK
1306static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1307{
1308 struct bnx2x *bp = fp->bp;
1309 int i;
1310
1311 /* ring wasn't allocated */
1312 if (fp->rx_buf_ring == NULL)
1313 return;
1314
1315 for (i = 0; i < NUM_RX_BD; i++) {
1316 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1317 u8 *data = rx_buf->data;
b3b83c3f 1318
e52fcb24 1319 if (data == NULL)
b3b83c3f 1320 continue;
b3b83c3f
DK
1321 dma_unmap_single(&bp->pdev->dev,
1322 dma_unmap_addr(rx_buf, mapping),
1323 fp->rx_buf_size, DMA_FROM_DEVICE);
1324
e52fcb24
ED
1325 rx_buf->data = NULL;
1326 kfree(data);
b3b83c3f
DK
1327 }
1328}
1329
55c11941
MS
1330static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1331{
1332 int j;
1333
1334 for_each_rx_queue_cnic(bp, j) {
1335 bnx2x_free_rx_bds(&bp->fp[j]);
1336 }
1337}
1338
9f6c9258
DK
1339static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1340{
b3b83c3f 1341 int j;
9f6c9258 1342
55c11941 1343 for_each_eth_queue(bp, j) {
9f6c9258
DK
1344 struct bnx2x_fastpath *fp = &bp->fp[j];
1345
b3b83c3f 1346 bnx2x_free_rx_bds(fp);
9f6c9258 1347
9f6c9258 1348 if (!fp->disable_tpa)
dfacf138 1349 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1350 }
1351}
1352
55c11941
MS
1353void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1354{
1355 bnx2x_free_tx_skbs_cnic(bp);
1356 bnx2x_free_rx_skbs_cnic(bp);
1357}
1358
9f6c9258
DK
1359void bnx2x_free_skbs(struct bnx2x *bp)
1360{
1361 bnx2x_free_tx_skbs(bp);
1362 bnx2x_free_rx_skbs(bp);
1363}
1364
e3835b99
DK
1365void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1366{
1367 /* load old values */
1368 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1369
1370 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1371 /* leave all but MAX value */
1372 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1373
1374 /* set new MAX value */
1375 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1376 & FUNC_MF_CFG_MAX_BW_MASK;
1377
1378 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1379 }
1380}
1381
ca92429f
DK
1382/**
1383 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1384 *
1385 * @bp: driver handle
1386 * @nvecs: number of vectors to be released
1387 */
1388static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1389{
ca92429f 1390 int i, offset = 0;
9f6c9258 1391
ca92429f
DK
1392 if (nvecs == offset)
1393 return;
1394 free_irq(bp->msix_table[offset].vector, bp->dev);
9f6c9258 1395 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
ca92429f
DK
1396 bp->msix_table[offset].vector);
1397 offset++;
55c11941
MS
1398
1399 if (CNIC_SUPPORT(bp)) {
1400 if (nvecs == offset)
1401 return;
1402 offset++;
1403 }
ca92429f 1404
ec6ba945 1405 for_each_eth_queue(bp, i) {
ca92429f
DK
1406 if (nvecs == offset)
1407 return;
51c1a580
MS
1408 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1409 i, bp->msix_table[offset].vector);
9f6c9258 1410
ca92429f 1411 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1412 }
1413}
1414
d6214d7a 1415void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1416{
30a5de77
DK
1417 if (bp->flags & USING_MSIX_FLAG &&
1418 !(bp->flags & USING_SINGLE_MSIX_FLAG))
ca92429f 1419 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
55c11941 1420 CNIC_SUPPORT(bp) + 1);
d6214d7a 1421 else
30a5de77 1422 free_irq(bp->dev->irq, bp->dev);
9f6c9258
DK
1423}
1424
0e8d2ec5 1425int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1426{
d6214d7a 1427 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1428
d6214d7a 1429 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580 1430 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
d6214d7a
DK
1431 bp->msix_table[0].entry);
1432 msix_vec++;
9f6c9258 1433
55c11941
MS
1434 /* Cnic requires an msix vector for itself */
1435 if (CNIC_SUPPORT(bp)) {
1436 bp->msix_table[msix_vec].entry = msix_vec;
1437 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1438 msix_vec, bp->msix_table[msix_vec].entry);
1439 msix_vec++;
1440 }
1441
6383c0b3 1442 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1443 for_each_eth_queue(bp, i) {
d6214d7a 1444 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1445 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1446 msix_vec, msix_vec, i);
d6214d7a 1447 msix_vec++;
9f6c9258
DK
1448 }
1449
55c11941 1450 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
d6214d7a
DK
1451
1452 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1453
1454 /*
1455 * reconfigure number of tx/rx queues according to available
1456 * MSI-X vectors
1457 */
55c11941 1458 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a
DK
1459 /* how less vectors we will have? */
1460 int diff = req_cnt - rc;
9f6c9258 1461
51c1a580 1462 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1463
1464 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1465
1466 if (rc) {
30a5de77
DK
1467 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1468 goto no_msix;
9f6c9258 1469 }
d6214d7a
DK
1470 /*
1471 * decrease number of queues by number of unallocated entries
1472 */
55c11941
MS
1473 bp->num_ethernet_queues -= diff;
1474 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1475
51c1a580 1476 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1477 bp->num_queues);
1478 } else if (rc > 0) {
1479 /* Get by with single vector */
1480 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1481 if (rc) {
1482 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1483 rc);
1484 goto no_msix;
1485 }
1486
1487 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1488 bp->flags |= USING_SINGLE_MSIX_FLAG;
1489
55c11941
MS
1490 BNX2X_DEV_INFO("set number of queues to 1\n");
1491 bp->num_ethernet_queues = 1;
1492 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1493 } else if (rc < 0) {
51c1a580 1494 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1495 goto no_msix;
9f6c9258
DK
1496 }
1497
1498 bp->flags |= USING_MSIX_FLAG;
1499
1500 return 0;
30a5de77
DK
1501
1502no_msix:
1503 /* fall to INTx if not enough memory */
1504 if (rc == -ENOMEM)
1505 bp->flags |= DISABLE_MSI_FLAG;
1506
1507 return rc;
9f6c9258
DK
1508}
1509
1510static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1511{
ca92429f 1512 int i, rc, offset = 0;
9f6c9258 1513
ca92429f
DK
1514 rc = request_irq(bp->msix_table[offset++].vector,
1515 bnx2x_msix_sp_int, 0,
9f6c9258
DK
1516 bp->dev->name, bp->dev);
1517 if (rc) {
1518 BNX2X_ERR("request sp irq failed\n");
1519 return -EBUSY;
1520 }
1521
55c11941
MS
1522 if (CNIC_SUPPORT(bp))
1523 offset++;
1524
ec6ba945 1525 for_each_eth_queue(bp, i) {
9f6c9258
DK
1526 struct bnx2x_fastpath *fp = &bp->fp[i];
1527 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1528 bp->dev->name, i);
1529
d6214d7a 1530 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1531 bnx2x_msix_fp_int, 0, fp->name, fp);
1532 if (rc) {
ca92429f
DK
1533 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1534 bp->msix_table[offset].vector, rc);
1535 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1536 return -EBUSY;
1537 }
1538
d6214d7a 1539 offset++;
9f6c9258
DK
1540 }
1541
ec6ba945 1542 i = BNX2X_NUM_ETH_QUEUES(bp);
55c11941 1543 offset = 1 + CNIC_SUPPORT(bp);
51c1a580 1544 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
9f6c9258
DK
1545 bp->msix_table[0].vector,
1546 0, bp->msix_table[offset].vector,
1547 i - 1, bp->msix_table[offset + i - 1].vector);
1548
1549 return 0;
1550}
1551
d6214d7a 1552int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1553{
1554 int rc;
1555
1556 rc = pci_enable_msi(bp->pdev);
1557 if (rc) {
51c1a580 1558 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1559 return -1;
1560 }
1561 bp->flags |= USING_MSI_FLAG;
1562
1563 return 0;
1564}
1565
1566static int bnx2x_req_irq(struct bnx2x *bp)
1567{
1568 unsigned long flags;
30a5de77 1569 unsigned int irq;
9f6c9258 1570
30a5de77 1571 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1572 flags = 0;
1573 else
1574 flags = IRQF_SHARED;
1575
30a5de77
DK
1576 if (bp->flags & USING_MSIX_FLAG)
1577 irq = bp->msix_table[0].vector;
1578 else
1579 irq = bp->pdev->irq;
1580
1581 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1582}
1583
1191cb83 1584static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1585{
1586 int rc = 0;
30a5de77
DK
1587 if (bp->flags & USING_MSIX_FLAG &&
1588 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1589 rc = bnx2x_req_msix_irqs(bp);
1590 if (rc)
1591 return rc;
1592 } else {
1593 bnx2x_ack_int(bp);
1594 rc = bnx2x_req_irq(bp);
1595 if (rc) {
1596 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1597 return rc;
1598 }
1599 if (bp->flags & USING_MSI_FLAG) {
1600 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1601 netdev_info(bp->dev, "using MSI IRQ %d\n",
1602 bp->dev->irq);
1603 }
1604 if (bp->flags & USING_MSIX_FLAG) {
1605 bp->dev->irq = bp->msix_table[0].vector;
1606 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1607 bp->dev->irq);
619c5cb6
VZ
1608 }
1609 }
1610
1611 return 0;
1612}
1613
55c11941
MS
1614static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1615{
1616 int i;
1617
1618 for_each_rx_queue_cnic(bp, i)
1619 napi_enable(&bnx2x_fp(bp, i, napi));
1620}
1621
1191cb83 1622static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1623{
1624 int i;
1625
55c11941 1626 for_each_eth_queue(bp, i)
9f6c9258
DK
1627 napi_enable(&bnx2x_fp(bp, i, napi));
1628}
1629
55c11941
MS
1630static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1631{
1632 int i;
1633
1634 for_each_rx_queue_cnic(bp, i)
1635 napi_disable(&bnx2x_fp(bp, i, napi));
1636}
1637
1191cb83 1638static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1639{
1640 int i;
1641
55c11941 1642 for_each_eth_queue(bp, i)
9f6c9258
DK
1643 napi_disable(&bnx2x_fp(bp, i, napi));
1644}
1645
1646void bnx2x_netif_start(struct bnx2x *bp)
1647{
4b7ed897
DK
1648 if (netif_running(bp->dev)) {
1649 bnx2x_napi_enable(bp);
55c11941
MS
1650 if (CNIC_LOADED(bp))
1651 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1652 bnx2x_int_enable(bp);
1653 if (bp->state == BNX2X_STATE_OPEN)
1654 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1655 }
1656}
1657
1658void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1659{
1660 bnx2x_int_disable_sync(bp, disable_hw);
1661 bnx2x_napi_disable(bp);
55c11941
MS
1662 if (CNIC_LOADED(bp))
1663 bnx2x_napi_disable_cnic(bp);
9f6c9258 1664}
9f6c9258 1665
8307fa3e
VZ
1666u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1667{
8307fa3e 1668 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1669
55c11941 1670 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1671 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1672 u16 ether_type = ntohs(hdr->h_proto);
1673
1674 /* Skip VLAN tag if present */
1675 if (ether_type == ETH_P_8021Q) {
1676 struct vlan_ethhdr *vhdr =
1677 (struct vlan_ethhdr *)skb->data;
1678
1679 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1680 }
1681
1682 /* If ethertype is FCoE or FIP - use FCoE ring */
1683 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1684 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1685 }
55c11941 1686
cdb9d6ae 1687 /* select a non-FCoE queue */
6383c0b3 1688 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1689}
1690
96305234 1691
d6214d7a
DK
1692void bnx2x_set_num_queues(struct bnx2x *bp)
1693{
96305234 1694 /* RSS queues */
55c11941 1695 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1696
a3348722
BW
1697 /* override in STORAGE SD modes */
1698 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1699 bp->num_ethernet_queues = 1;
1700
ec6ba945 1701 /* Add special queues */
55c11941
MS
1702 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1703 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1704
1705 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1706}
1707
cdb9d6ae
VZ
1708/**
1709 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1710 *
1711 * @bp: Driver handle
1712 *
1713 * We currently support for at most 16 Tx queues for each CoS thus we will
1714 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1715 * bp->max_cos.
1716 *
1717 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1718 * index after all ETH L2 indices.
1719 *
1720 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1721 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1722 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1723 *
1724 * The proper configuration of skb->queue_mapping is handled by
1725 * bnx2x_select_queue() and __skb_tx_hash().
1726 *
1727 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1728 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1729 */
55c11941 1730static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1731{
6383c0b3 1732 int rc, tx, rx;
ec6ba945 1733
65565884 1734 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1735 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1736
6383c0b3 1737/* account for fcoe queue */
55c11941
MS
1738 if (include_cnic && !NO_FCOE(bp)) {
1739 rx++;
1740 tx++;
6383c0b3 1741 }
6383c0b3
AE
1742
1743 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1744 if (rc) {
1745 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1746 return rc;
1747 }
1748 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1749 if (rc) {
1750 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1751 return rc;
1752 }
1753
51c1a580 1754 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1755 tx, rx);
1756
ec6ba945
VZ
1757 return rc;
1758}
1759
1191cb83 1760static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1761{
1762 int i;
1763
1764 for_each_queue(bp, i) {
1765 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1766 u32 mtu;
a8c94b91
VZ
1767
1768 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1769 if (IS_FCOE_IDX(i))
1770 /*
1771 * Although there are no IP frames expected to arrive to
1772 * this ring we still want to add an
1773 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1774 * overrun attack.
1775 */
e52fcb24 1776 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1777 else
e52fcb24
ED
1778 mtu = bp->dev->mtu;
1779 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1780 IP_HEADER_ALIGNMENT_PADDING +
1781 ETH_OVREHEAD +
1782 mtu +
1783 BNX2X_FW_RX_ALIGN_END;
1784 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
a8c94b91
VZ
1785 }
1786}
1787
1191cb83 1788static int bnx2x_init_rss_pf(struct bnx2x *bp)
619c5cb6
VZ
1789{
1790 int i;
619c5cb6
VZ
1791 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1792
96305234 1793 /* Prepare the initial contents fo the indirection table if RSS is
619c5cb6
VZ
1794 * enabled
1795 */
5d317c6a
MS
1796 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1797 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1798 bp->fp->cl_id +
1799 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1800
1801 /*
1802 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1803 * per-port, so if explicit configuration is needed , do it only
1804 * for a PMF.
1805 *
1806 * For 57712 and newer on the other hand it's a per-function
1807 * configuration.
1808 */
5d317c6a 1809 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1810}
1811
96305234 1812int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
5d317c6a 1813 bool config_hash)
619c5cb6 1814{
3b603066 1815 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1816 int i;
1817
1818 /* Although RSS is meaningless when there is a single HW queue we
1819 * still need it enabled in order to have HW Rx hash generated.
1820 *
1821 * if (!is_eth_multi(bp))
1822 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1823 */
1824
96305234 1825 params.rss_obj = rss_obj;
619c5cb6
VZ
1826
1827 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1828
96305234 1829 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
619c5cb6 1830
96305234
DK
1831 /* RSS configuration */
1832 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1833 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1834 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1835 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
5d317c6a
MS
1836 if (rss_obj->udp_rss_v4)
1837 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1838 if (rss_obj->udp_rss_v6)
1839 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
619c5cb6 1840
96305234
DK
1841 /* Hash bits */
1842 params.rss_result_mask = MULTI_MASK;
619c5cb6 1843
5d317c6a 1844 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 1845
96305234
DK
1846 if (config_hash) {
1847 /* RSS keys */
1848 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1849 params.rss_key[i] = random32();
619c5cb6 1850
96305234 1851 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
1852 }
1853
1854 return bnx2x_config_rss(bp, &params);
1855}
1856
1191cb83 1857static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 1858{
3b603066 1859 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
1860
1861 /* Prepare parameters for function state transitions */
1862 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1863
1864 func_params.f_obj = &bp->func_obj;
1865 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1866
1867 func_params.params.hw_init.load_phase = load_code;
1868
1869 return bnx2x_func_state_change(bp, &func_params);
1870}
1871
1872/*
1873 * Cleans the object that have internal lists without sending
1874 * ramrods. Should be run when interrutps are disabled.
1875 */
1876static void bnx2x_squeeze_objects(struct bnx2x *bp)
1877{
1878 int rc;
1879 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 1880 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 1881 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
1882
1883 /***************** Cleanup MACs' object first *************************/
1884
1885 /* Wait for completion of requested */
1886 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1887 /* Perform a dry cleanup */
1888 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1889
1890 /* Clean ETH primary MAC */
1891 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 1892 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
1893 &ramrod_flags);
1894 if (rc != 0)
1895 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1896
1897 /* Cleanup UC list */
1898 vlan_mac_flags = 0;
1899 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1900 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1901 &ramrod_flags);
1902 if (rc != 0)
1903 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1904
1905 /***************** Now clean mcast object *****************************/
1906 rparam.mcast_obj = &bp->mcast_obj;
1907 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1908
1909 /* Add a DEL command... */
1910 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1911 if (rc < 0)
51c1a580
MS
1912 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1913 rc);
619c5cb6
VZ
1914
1915 /* ...and wait until all pending commands are cleared */
1916 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1917 while (rc != 0) {
1918 if (rc < 0) {
1919 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1920 rc);
1921 return;
1922 }
1923
1924 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1925 }
1926}
1927
1928#ifndef BNX2X_STOP_ON_ERROR
1929#define LOAD_ERROR_EXIT(bp, label) \
1930 do { \
1931 (bp)->state = BNX2X_STATE_ERROR; \
1932 goto label; \
1933 } while (0)
55c11941
MS
1934
1935#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1936 do { \
1937 bp->cnic_loaded = false; \
1938 goto label; \
1939 } while (0)
1940#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
1941#define LOAD_ERROR_EXIT(bp, label) \
1942 do { \
1943 (bp)->state = BNX2X_STATE_ERROR; \
1944 (bp)->panic = 1; \
1945 return -EBUSY; \
1946 } while (0)
55c11941
MS
1947#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1948 do { \
1949 bp->cnic_loaded = false; \
1950 (bp)->panic = 1; \
1951 return -EBUSY; \
1952 } while (0)
1953#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 1954
452427b0
YM
1955bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1956{
1957 /* build FW version dword */
1958 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1959 (BCM_5710_FW_MINOR_VERSION << 8) +
1960 (BCM_5710_FW_REVISION_VERSION << 16) +
1961 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1962
1963 /* read loaded FW from chip */
1964 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1965
1966 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1967
1968 if (loaded_fw != my_fw) {
1969 if (is_err)
1970 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1971 loaded_fw, my_fw);
1972 return false;
1973 }
1974
1975 return true;
1976}
1977
1191cb83
ED
1978/**
1979 * bnx2x_bz_fp - zero content of the fastpath structure.
1980 *
1981 * @bp: driver handle
1982 * @index: fastpath index to be zeroed
1983 *
1984 * Makes sure the contents of the bp->fp[index].napi is kept
1985 * intact.
1986 */
1987static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1988{
1989 struct bnx2x_fastpath *fp = &bp->fp[index];
15192a8c
BW
1990 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
1991
65565884 1992 int cos;
1191cb83 1993 struct napi_struct orig_napi = fp->napi;
15192a8c 1994 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1191cb83 1995 /* bzero bnx2x_fastpath contents */
15192a8c
BW
1996 if (bp->stats_init) {
1997 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1191cb83 1998 memset(fp, 0, sizeof(*fp));
15192a8c 1999 } else {
1191cb83
ED
2000 /* Keep Queue statistics */
2001 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2002 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2003
2004 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2005 GFP_KERNEL);
2006 if (tmp_eth_q_stats)
15192a8c 2007 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
1191cb83
ED
2008 sizeof(struct bnx2x_eth_q_stats));
2009
2010 tmp_eth_q_stats_old =
2011 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2012 GFP_KERNEL);
2013 if (tmp_eth_q_stats_old)
15192a8c 2014 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
1191cb83
ED
2015 sizeof(struct bnx2x_eth_q_stats_old));
2016
15192a8c 2017 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1191cb83
ED
2018 memset(fp, 0, sizeof(*fp));
2019
2020 if (tmp_eth_q_stats) {
15192a8c
BW
2021 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2022 sizeof(struct bnx2x_eth_q_stats));
1191cb83
ED
2023 kfree(tmp_eth_q_stats);
2024 }
2025
2026 if (tmp_eth_q_stats_old) {
15192a8c 2027 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
1191cb83
ED
2028 sizeof(struct bnx2x_eth_q_stats_old));
2029 kfree(tmp_eth_q_stats_old);
2030 }
2031
2032 }
2033
2034 /* Restore the NAPI object as it has been already initialized */
2035 fp->napi = orig_napi;
15192a8c 2036 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2037 fp->bp = bp;
2038 fp->index = index;
2039 if (IS_ETH_FP(fp))
2040 fp->max_cos = bp->max_cos;
2041 else
2042 /* Special queues support only one CoS */
2043 fp->max_cos = 1;
2044
65565884 2045 /* Init txdata pointers */
65565884
MS
2046 if (IS_FCOE_FP(fp))
2047 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2048 if (IS_ETH_FP(fp))
2049 for_each_cos_in_tx_queue(fp, cos)
2050 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2051 BNX2X_NUM_ETH_QUEUES(bp) + index];
2052
1191cb83
ED
2053 /*
2054 * set the tpa flag for each queue. The tpa flag determines the queue
2055 * minimal size so it must be set prior to queue memory allocation
2056 */
2057 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2058 (bp->flags & GRO_ENABLE_FLAG &&
2059 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2060 if (bp->flags & TPA_ENABLE_FLAG)
2061 fp->mode = TPA_MODE_LRO;
2062 else if (bp->flags & GRO_ENABLE_FLAG)
2063 fp->mode = TPA_MODE_GRO;
2064
1191cb83
ED
2065 /* We don't want TPA on an FCoE L2 ring */
2066 if (IS_FCOE_FP(fp))
2067 fp->disable_tpa = 1;
55c11941
MS
2068}
2069
2070int bnx2x_load_cnic(struct bnx2x *bp)
2071{
2072 int i, rc, port = BP_PORT(bp);
2073
2074 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2075
2076 mutex_init(&bp->cnic_mutex);
2077
2078 rc = bnx2x_alloc_mem_cnic(bp);
2079 if (rc) {
2080 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2081 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2082 }
2083
2084 rc = bnx2x_alloc_fp_mem_cnic(bp);
2085 if (rc) {
2086 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2087 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2088 }
2089
2090 /* Update the number of queues with the cnic queues */
2091 rc = bnx2x_set_real_num_queues(bp, 1);
2092 if (rc) {
2093 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2094 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2095 }
2096
2097 /* Add all CNIC NAPI objects */
2098 bnx2x_add_all_napi_cnic(bp);
2099 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2100 bnx2x_napi_enable_cnic(bp);
2101
2102 rc = bnx2x_init_hw_func_cnic(bp);
2103 if (rc)
2104 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2105
2106 bnx2x_nic_init_cnic(bp);
2107
2108 /* Enable Timer scan */
2109 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2110
2111 for_each_cnic_queue(bp, i) {
2112 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2113 if (rc) {
2114 BNX2X_ERR("Queue setup failed\n");
2115 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2116 }
2117 }
2118
2119 /* Initialize Rx filter. */
2120 netif_addr_lock_bh(bp->dev);
2121 bnx2x_set_rx_mode(bp->dev);
2122 netif_addr_unlock_bh(bp->dev);
2123
2124 /* re-read iscsi info */
2125 bnx2x_get_iscsi_info(bp);
2126 bnx2x_setup_cnic_irq_info(bp);
2127 bnx2x_setup_cnic_info(bp);
2128 bp->cnic_loaded = true;
2129 if (bp->state == BNX2X_STATE_OPEN)
2130 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2131
2132
2133 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2134
2135 return 0;
2136
2137#ifndef BNX2X_STOP_ON_ERROR
2138load_error_cnic2:
2139 /* Disable Timer scan */
2140 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2141
2142load_error_cnic1:
2143 bnx2x_napi_disable_cnic(bp);
2144 /* Update the number of queues without the cnic queues */
2145 rc = bnx2x_set_real_num_queues(bp, 0);
2146 if (rc)
2147 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2148load_error_cnic0:
2149 BNX2X_ERR("CNIC-related load failed\n");
2150 bnx2x_free_fp_mem_cnic(bp);
2151 bnx2x_free_mem_cnic(bp);
2152 return rc;
2153#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2154}
2155
2156
9f6c9258
DK
2157/* must be called with rtnl_lock */
2158int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2159{
619c5cb6 2160 int port = BP_PORT(bp);
9f6c9258
DK
2161 u32 load_code;
2162 int i, rc;
2163
55c11941
MS
2164 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2165 DP(NETIF_MSG_IFUP,
2166 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2167
9f6c9258 2168#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2169 if (unlikely(bp->panic)) {
2170 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2171 return -EPERM;
51c1a580 2172 }
9f6c9258
DK
2173#endif
2174
2175 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2176
2ae17f66
VZ
2177 /* Set the initial link reported state to link down */
2178 bnx2x_acquire_phy_lock(bp);
2179 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2180 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2181 &bp->last_reported_link.link_report_flags);
2182 bnx2x_release_phy_lock(bp);
2183
523224a3
DK
2184 /* must be called before memory allocation and HW init */
2185 bnx2x_ilt_set_info(bp);
2186
6383c0b3
AE
2187 /*
2188 * Zero fastpath structures preserving invariants like napi, which are
2189 * allocated only once, fp index, max_cos, bp pointer.
65565884 2190 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2191 */
51c1a580 2192 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2193 for_each_queue(bp, i)
2194 bnx2x_bz_fp(bp, i);
55c11941
MS
2195 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2196 bp->num_cnic_queues) *
2197 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2198
55c11941 2199 bp->fcoe_init = false;
6383c0b3 2200
a8c94b91
VZ
2201 /* Set the receive queues buffer size */
2202 bnx2x_set_rx_buf_size(bp);
2203
d6214d7a 2204 if (bnx2x_alloc_mem(bp))
9f6c9258 2205 return -ENOMEM;
d6214d7a 2206
b3b83c3f
DK
2207 /* As long as bnx2x_alloc_mem() may possibly update
2208 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2209 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2210 */
55c11941 2211 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2212 if (rc) {
ec6ba945 2213 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2214 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2215 }
2216
6383c0b3
AE
2217 /* configure multi cos mappings in kernel.
2218 * this configuration may be overriden by a multi class queue discipline
2219 * or by a dcbx negotiation result.
2220 */
2221 bnx2x_setup_tc(bp->dev, bp->max_cos);
2222
26614ba5
MS
2223 /* Add all NAPI objects */
2224 bnx2x_add_all_napi(bp);
55c11941 2225 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2226 bnx2x_napi_enable(bp);
2227
889b9af3
AE
2228 /* set pf load just before approaching the MCP */
2229 bnx2x_set_pf_load(bp);
2230
9f6c9258 2231 /* Send LOAD_REQUEST command to MCP
619c5cb6
VZ
2232 * Returns the type of LOAD command:
2233 * if it is the first port to be initialized
2234 * common blocks should be initialized, otherwise - not
2235 */
9f6c9258 2236 if (!BP_NOMCP(bp)) {
95c6c616
AE
2237 /* init fw_seq */
2238 bp->fw_seq =
2239 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2240 DRV_MSG_SEQ_NUMBER_MASK);
2241 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2242
2243 /* Get current FW pulse sequence */
2244 bp->fw_drv_pulse_wr_seq =
2245 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2246 DRV_PULSE_SEQ_MASK);
2247 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2248
b884d95b
YR
2249 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2250 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9f6c9258
DK
2251 if (!load_code) {
2252 BNX2X_ERR("MCP response failure, aborting\n");
2253 rc = -EBUSY;
619c5cb6 2254 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258
DK
2255 }
2256 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
51c1a580 2257 BNX2X_ERR("Driver load refused\n");
9f6c9258 2258 rc = -EBUSY; /* other port in diagnostic mode */
619c5cb6 2259 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258 2260 }
d1e2d966
AE
2261 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2262 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
d1e2d966 2263 /* abort nic load if version mismatch */
452427b0 2264 if (!bnx2x_test_firmware_version(bp, true)) {
d1e2d966
AE
2265 rc = -EBUSY;
2266 LOAD_ERROR_EXIT(bp, load_error2);
2267 }
2268 }
9f6c9258
DK
2269
2270 } else {
f2e0899f 2271 int path = BP_PATH(bp);
9f6c9258 2272
f2e0899f
DK
2273 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2274 path, load_count[path][0], load_count[path][1],
2275 load_count[path][2]);
2276 load_count[path][0]++;
2277 load_count[path][1 + port]++;
2278 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2279 path, load_count[path][0], load_count[path][1],
2280 load_count[path][2]);
2281 if (load_count[path][0] == 1)
9f6c9258 2282 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 2283 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
2284 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2285 else
2286 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2287 }
2288
2289 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 2290 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
3deb8167 2291 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
9f6c9258 2292 bp->port.pmf = 1;
3deb8167
YR
2293 /*
2294 * We need the barrier to ensure the ordering between the
2295 * writing to bp->port.pmf here and reading it from the
2296 * bnx2x_periodic_task().
2297 */
2298 smp_mb();
3deb8167 2299 } else
9f6c9258 2300 bp->port.pmf = 0;
6383c0b3 2301
51c1a580 2302 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
9f6c9258 2303
619c5cb6
VZ
2304 /* Init Function state controlling object */
2305 bnx2x__init_func_obj(bp);
2306
9f6c9258
DK
2307 /* Initialize HW */
2308 rc = bnx2x_init_hw(bp, load_code);
2309 if (rc) {
2310 BNX2X_ERR("HW init failed, aborting\n");
a22f0788 2311 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2312 LOAD_ERROR_EXIT(bp, load_error2);
9f6c9258
DK
2313 }
2314
d6214d7a
DK
2315 /* Connect to IRQs */
2316 rc = bnx2x_setup_irqs(bp);
523224a3 2317 if (rc) {
51c1a580 2318 BNX2X_ERR("IRQs setup failed\n");
523224a3 2319 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2320 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2321 }
2322
9f6c9258
DK
2323 /* Setup NIC internals and enable interrupts */
2324 bnx2x_nic_init(bp, load_code);
2325
619c5cb6
VZ
2326 /* Init per-function objects */
2327 bnx2x_init_bp_objs(bp);
2328
f2e0899f
DK
2329 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2330 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
619c5cb6
VZ
2331 (bp->common.shmem2_base)) {
2332 if (SHMEM2_HAS(bp, dcc_support))
2333 SHMEM2_WR(bp, dcc_support,
2334 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2335 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
a3348722
BW
2336 if (SHMEM2_HAS(bp, afex_driver_support))
2337 SHMEM2_WR(bp, afex_driver_support,
2338 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
619c5cb6
VZ
2339 }
2340
a3348722
BW
2341 /* Set AFEX default VLAN tag to an invalid value */
2342 bp->afex_def_vlan_tag = -1;
2343
619c5cb6
VZ
2344 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2345 rc = bnx2x_func_start(bp);
2346 if (rc) {
2347 BNX2X_ERR("Function start failed!\n");
c636322b 2348 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6
VZ
2349 LOAD_ERROR_EXIT(bp, load_error3);
2350 }
9f6c9258
DK
2351
2352 /* Send LOAD_DONE command to MCP */
2353 if (!BP_NOMCP(bp)) {
a22f0788 2354 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
2355 if (!load_code) {
2356 BNX2X_ERR("MCP response failure, aborting\n");
2357 rc = -EBUSY;
619c5cb6 2358 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258
DK
2359 }
2360 }
2361
619c5cb6 2362 rc = bnx2x_setup_leading(bp);
9f6c9258
DK
2363 if (rc) {
2364 BNX2X_ERR("Setup leading failed!\n");
619c5cb6 2365 LOAD_ERROR_EXIT(bp, load_error3);
f2e0899f 2366 }
9f6c9258 2367
55c11941 2368 for_each_nondefault_eth_queue(bp, i) {
619c5cb6 2369 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
51c1a580
MS
2370 if (rc) {
2371 BNX2X_ERR("Queue setup failed\n");
55c11941 2372 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2373 }
523224a3
DK
2374 }
2375
619c5cb6 2376 rc = bnx2x_init_rss_pf(bp);
51c1a580
MS
2377 if (rc) {
2378 BNX2X_ERR("PF RSS init failed\n");
55c11941 2379 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2380 }
619c5cb6 2381
523224a3
DK
2382 /* Now when Clients are configured we are ready to work */
2383 bp->state = BNX2X_STATE_OPEN;
2384
619c5cb6
VZ
2385 /* Configure a ucast MAC */
2386 rc = bnx2x_set_eth_mac(bp, true);
51c1a580
MS
2387 if (rc) {
2388 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2389 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2390 }
6e30dd4e 2391
e3835b99
DK
2392 if (bp->pending_max) {
2393 bnx2x_update_max_mf_config(bp, bp->pending_max);
2394 bp->pending_max = 0;
2395 }
2396
9f6c9258
DK
2397 if (bp->port.pmf)
2398 bnx2x_initial_phy_init(bp, load_mode);
2399
619c5cb6
VZ
2400 /* Start fast path */
2401
2402 /* Initialize Rx filter. */
2403 netif_addr_lock_bh(bp->dev);
6e30dd4e 2404 bnx2x_set_rx_mode(bp->dev);
619c5cb6 2405 netif_addr_unlock_bh(bp->dev);
6e30dd4e 2406
619c5cb6 2407 /* Start the Tx */
9f6c9258
DK
2408 switch (load_mode) {
2409 case LOAD_NORMAL:
523224a3
DK
2410 /* Tx queue should be only reenabled */
2411 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2412 break;
2413
2414 case LOAD_OPEN:
2415 netif_tx_start_all_queues(bp->dev);
523224a3 2416 smp_mb__after_clear_bit();
9f6c9258
DK
2417 break;
2418
2419 case LOAD_DIAG:
8970b2e4 2420 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2421 bp->state = BNX2X_STATE_DIAG;
2422 break;
2423
2424 default:
2425 break;
2426 }
2427
00253a8c 2428 if (bp->port.pmf)
4c704899 2429 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2430 else
9f6c9258
DK
2431 bnx2x__link_status_update(bp);
2432
2433 /* start the timer */
2434 mod_timer(&bp->timer, jiffies + bp->current_interval);
2435
55c11941
MS
2436 if (CNIC_ENABLED(bp))
2437 bnx2x_load_cnic(bp);
9f6c9258 2438
9ce392d4
YM
2439 /* mark driver is loaded in shmem2 */
2440 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2441 u32 val;
2442 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2443 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2444 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2445 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2446 }
2447
619c5cb6
VZ
2448 /* Wait for all pending SP commands to complete */
2449 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2450 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2451 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2452 return -EBUSY;
2453 }
6891dd25 2454
9876879f
BW
2455 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2456 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2457 bnx2x_dcbx_init(bp, false);
2458
55c11941
MS
2459 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2460
9f6c9258
DK
2461 return 0;
2462
619c5cb6 2463#ifndef BNX2X_STOP_ON_ERROR
9f6c9258
DK
2464load_error3:
2465 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2466
619c5cb6
VZ
2467 /* Clean queueable objects */
2468 bnx2x_squeeze_objects(bp);
2469
9f6c9258
DK
2470 /* Free SKBs, SGEs, TPA pool and driver internals */
2471 bnx2x_free_skbs(bp);
ec6ba945 2472 for_each_rx_queue(bp, i)
9f6c9258 2473 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2474
9f6c9258 2475 /* Release IRQs */
d6214d7a
DK
2476 bnx2x_free_irq(bp);
2477load_error2:
2478 if (!BP_NOMCP(bp)) {
2479 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2480 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2481 }
2482
2483 bp->port.pmf = 0;
9f6c9258
DK
2484load_error1:
2485 bnx2x_napi_disable(bp);
889b9af3
AE
2486 /* clear pf_load status, as it was already set */
2487 bnx2x_clear_pf_load(bp);
d6214d7a 2488load_error0:
9f6c9258
DK
2489 bnx2x_free_mem(bp);
2490
2491 return rc;
619c5cb6 2492#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2493}
2494
2495/* must be called with rtnl_lock */
5d07d868 2496int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2497{
2498 int i;
c9ee9206
VZ
2499 bool global = false;
2500
55c11941
MS
2501 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2502
9ce392d4
YM
2503 /* mark driver is unloaded in shmem2 */
2504 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2505 u32 val;
2506 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2507 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2508 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2509 }
2510
c9ee9206
VZ
2511 if ((bp->state == BNX2X_STATE_CLOSED) ||
2512 (bp->state == BNX2X_STATE_ERROR)) {
2513 /* We can get here if the driver has been unloaded
2514 * during parity error recovery and is either waiting for a
2515 * leader to complete or for other functions to unload and
2516 * then ifdown has been issued. In this case we want to
2517 * unload and let other functions to complete a recovery
2518 * process.
2519 */
9f6c9258
DK
2520 bp->recovery_state = BNX2X_RECOVERY_DONE;
2521 bp->is_leader = 0;
c9ee9206
VZ
2522 bnx2x_release_leader_lock(bp);
2523 smp_mb();
2524
51c1a580
MS
2525 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2526 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2527 return -EINVAL;
2528 }
2529
87b7ba3d
VZ
2530 /*
2531 * It's important to set the bp->state to the value different from
2532 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2533 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2534 */
2535 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2536 smp_mb();
2537
55c11941
MS
2538 if (CNIC_LOADED(bp))
2539 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2540
9505ee37
VZ
2541 /* Stop Tx */
2542 bnx2x_tx_disable(bp);
65565884 2543 netdev_reset_tc(bp->dev);
9505ee37 2544
9f6c9258 2545 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2546
9f6c9258 2547 del_timer_sync(&bp->timer);
f85582f8 2548
619c5cb6
VZ
2549 /* Set ALWAYS_ALIVE bit in shmem */
2550 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2551
2552 bnx2x_drv_pulse(bp);
9f6c9258 2553
f85582f8 2554 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1355b704 2555 bnx2x_save_statistics(bp);
9f6c9258
DK
2556
2557 /* Cleanup the chip if needed */
2558 if (unload_mode != UNLOAD_RECOVERY)
5d07d868 2559 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2560 else {
c9ee9206
VZ
2561 /* Send the UNLOAD_REQUEST to the MCP */
2562 bnx2x_send_unload_req(bp, unload_mode);
2563
2564 /*
2565 * Prevent transactions to host from the functions on the
2566 * engine that doesn't reset global blocks in case of global
2567 * attention once gloabl blocks are reset and gates are opened
2568 * (the engine which leader will perform the recovery
2569 * last).
2570 */
2571 if (!CHIP_IS_E1x(bp))
2572 bnx2x_pf_disable(bp);
2573
2574 /* Disable HW interrupts, NAPI */
523224a3 2575 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2576 /* Delete all NAPI objects */
2577 bnx2x_del_all_napi(bp);
55c11941
MS
2578 if (CNIC_LOADED(bp))
2579 bnx2x_del_all_napi_cnic(bp);
523224a3 2580 /* Release IRQs */
d6214d7a 2581 bnx2x_free_irq(bp);
c9ee9206
VZ
2582
2583 /* Report UNLOAD_DONE to MCP */
5d07d868 2584 bnx2x_send_unload_done(bp, false);
523224a3 2585 }
9f6c9258 2586
619c5cb6
VZ
2587 /*
2588 * At this stage no more interrupts will arrive so we may safly clean
2589 * the queueable objects here in case they failed to get cleaned so far.
2590 */
2591 bnx2x_squeeze_objects(bp);
2592
79616895
VZ
2593 /* There should be no more pending SP commands at this stage */
2594 bp->sp_state = 0;
2595
9f6c9258
DK
2596 bp->port.pmf = 0;
2597
2598 /* Free SKBs, SGEs, TPA pool and driver internals */
2599 bnx2x_free_skbs(bp);
55c11941
MS
2600 if (CNIC_LOADED(bp))
2601 bnx2x_free_skbs_cnic(bp);
ec6ba945 2602 for_each_rx_queue(bp, i)
9f6c9258 2603 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2604
55c11941
MS
2605 if (CNIC_LOADED(bp)) {
2606 bnx2x_free_fp_mem_cnic(bp);
2607 bnx2x_free_mem_cnic(bp);
2608 }
9f6c9258
DK
2609 bnx2x_free_mem(bp);
2610
2611 bp->state = BNX2X_STATE_CLOSED;
55c11941 2612 bp->cnic_loaded = false;
9f6c9258 2613
c9ee9206
VZ
2614 /* Check if there are pending parity attentions. If there are - set
2615 * RECOVERY_IN_PROGRESS.
2616 */
2617 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2618 bnx2x_set_reset_in_progress(bp);
2619
2620 /* Set RESET_IS_GLOBAL if needed */
2621 if (global)
2622 bnx2x_set_reset_global(bp);
2623 }
2624
2625
9f6c9258
DK
2626 /* The last driver must disable a "close the gate" if there is no
2627 * parity attention or "process kill" pending.
2628 */
889b9af3 2629 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2630 bnx2x_disable_close_the_gate(bp);
2631
55c11941
MS
2632 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2633
9f6c9258
DK
2634 return 0;
2635}
f85582f8 2636
9f6c9258
DK
2637int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2638{
2639 u16 pmcsr;
2640
adf5f6a1
DK
2641 /* If there is no power capability, silently succeed */
2642 if (!bp->pm_cap) {
51c1a580 2643 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
2644 return 0;
2645 }
2646
9f6c9258
DK
2647 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2648
2649 switch (state) {
2650 case PCI_D0:
2651 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2652 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2653 PCI_PM_CTRL_PME_STATUS));
2654
2655 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2656 /* delay required during transition out of D3hot */
2657 msleep(20);
2658 break;
2659
2660 case PCI_D3hot:
2661 /* If there are other clients above don't
2662 shut down the power */
2663 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2664 return 0;
2665 /* Don't shut down the power for emulation and FPGA */
2666 if (CHIP_REV_IS_SLOW(bp))
2667 return 0;
2668
2669 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2670 pmcsr |= 3;
2671
2672 if (bp->wol)
2673 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2674
2675 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2676 pmcsr);
2677
2678 /* No more memory access after this point until
2679 * device is brought back to D0.
2680 */
2681 break;
2682
2683 default:
51c1a580 2684 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
2685 return -EINVAL;
2686 }
2687 return 0;
2688}
2689
9f6c9258
DK
2690/*
2691 * net_device service functions
2692 */
d6214d7a 2693int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
2694{
2695 int work_done = 0;
6383c0b3 2696 u8 cos;
9f6c9258
DK
2697 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2698 napi);
2699 struct bnx2x *bp = fp->bp;
2700
2701 while (1) {
2702#ifdef BNX2X_STOP_ON_ERROR
2703 if (unlikely(bp->panic)) {
2704 napi_complete(napi);
2705 return 0;
2706 }
2707#endif
2708
6383c0b3 2709 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
2710 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2711 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 2712
9f6c9258
DK
2713
2714 if (bnx2x_has_rx_work(fp)) {
2715 work_done += bnx2x_rx_int(fp, budget - work_done);
2716
2717 /* must not complete if we consumed full budget */
2718 if (work_done >= budget)
2719 break;
2720 }
2721
2722 /* Fall out from the NAPI loop if needed */
2723 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 2724
ec6ba945
VZ
2725 /* No need to update SB for FCoE L2 ring as long as
2726 * it's connected to the default SB and the SB
2727 * has been updated when NAPI was scheduled.
2728 */
2729 if (IS_FCOE_FP(fp)) {
2730 napi_complete(napi);
2731 break;
2732 }
9f6c9258 2733 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
2734 /* bnx2x_has_rx_work() reads the status block,
2735 * thus we need to ensure that status block indices
2736 * have been actually read (bnx2x_update_fpsb_idx)
2737 * prior to this check (bnx2x_has_rx_work) so that
2738 * we won't write the "newer" value of the status block
2739 * to IGU (if there was a DMA right after
2740 * bnx2x_has_rx_work and if there is no rmb, the memory
2741 * reading (bnx2x_update_fpsb_idx) may be postponed
2742 * to right before bnx2x_ack_sb). In this case there
2743 * will never be another interrupt until there is
2744 * another update of the status block, while there
2745 * is still unhandled work.
2746 */
9f6c9258
DK
2747 rmb();
2748
2749 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2750 napi_complete(napi);
2751 /* Re-enable interrupts */
51c1a580 2752 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
2753 "Update index to %d\n", fp->fp_hc_idx);
2754 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2755 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
2756 IGU_INT_ENABLE, 1);
2757 break;
2758 }
2759 }
2760 }
2761
2762 return work_done;
2763}
2764
9f6c9258
DK
2765/* we split the first BD into headers and data BDs
2766 * to ease the pain of our fellow microcode engineers
2767 * we use one mapping for both BDs
9f6c9258
DK
2768 */
2769static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
6383c0b3 2770 struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
2771 struct sw_tx_bd *tx_buf,
2772 struct eth_tx_start_bd **tx_bd, u16 hlen,
2773 u16 bd_prod, int nbd)
2774{
2775 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2776 struct eth_tx_bd *d_tx_bd;
2777 dma_addr_t mapping;
2778 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2779
2780 /* first fix first BD */
2781 h_tx_bd->nbd = cpu_to_le16(nbd);
2782 h_tx_bd->nbytes = cpu_to_le16(hlen);
2783
51c1a580
MS
2784 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2785 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
9f6c9258
DK
2786
2787 /* now get a new data BD
2788 * (after the pbd) and fill it */
2789 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 2790 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
2791
2792 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2793 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2794
2795 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2796 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2797 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2798
2799 /* this marks the BD as one that has no individual mapping */
2800 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2801
2802 DP(NETIF_MSG_TX_QUEUED,
2803 "TSO split data size is %d (%x:%x)\n",
2804 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2805
2806 /* update tx_bd */
2807 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2808
2809 return bd_prod;
2810}
2811
2812static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2813{
2814 if (fix > 0)
2815 csum = (u16) ~csum_fold(csum_sub(csum,
2816 csum_partial(t_header - fix, fix, 0)));
2817
2818 else if (fix < 0)
2819 csum = (u16) ~csum_fold(csum_add(csum,
2820 csum_partial(t_header, -fix, 0)));
2821
2822 return swab16(csum);
2823}
2824
2825static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2826{
2827 u32 rc;
2828
2829 if (skb->ip_summed != CHECKSUM_PARTIAL)
2830 rc = XMIT_PLAIN;
2831
2832 else {
d0d9d8ef 2833 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
2834 rc = XMIT_CSUM_V6;
2835 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2836 rc |= XMIT_CSUM_TCP;
2837
2838 } else {
2839 rc = XMIT_CSUM_V4;
2840 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2841 rc |= XMIT_CSUM_TCP;
2842 }
2843 }
2844
5892b9e9
VZ
2845 if (skb_is_gso_v6(skb))
2846 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2847 else if (skb_is_gso(skb))
2848 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
2849
2850 return rc;
2851}
2852
2853#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2854/* check if packet requires linearization (packet is too fragmented)
2855 no need to check fragmentation if page size > 8K (there will be no
2856 violation to FW restrictions) */
2857static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2858 u32 xmit_type)
2859{
2860 int to_copy = 0;
2861 int hlen = 0;
2862 int first_bd_sz = 0;
2863
2864 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2865 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2866
2867 if (xmit_type & XMIT_GSO) {
2868 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2869 /* Check if LSO packet needs to be copied:
2870 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2871 int wnd_size = MAX_FETCH_BD - 3;
2872 /* Number of windows to check */
2873 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2874 int wnd_idx = 0;
2875 int frag_idx = 0;
2876 u32 wnd_sum = 0;
2877
2878 /* Headers length */
2879 hlen = (int)(skb_transport_header(skb) - skb->data) +
2880 tcp_hdrlen(skb);
2881
2882 /* Amount of data (w/o headers) on linear part of SKB*/
2883 first_bd_sz = skb_headlen(skb) - hlen;
2884
2885 wnd_sum = first_bd_sz;
2886
2887 /* Calculate the first sum - it's special */
2888 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2889 wnd_sum +=
9e903e08 2890 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
2891
2892 /* If there was data on linear skb data - check it */
2893 if (first_bd_sz > 0) {
2894 if (unlikely(wnd_sum < lso_mss)) {
2895 to_copy = 1;
2896 goto exit_lbl;
2897 }
2898
2899 wnd_sum -= first_bd_sz;
2900 }
2901
2902 /* Others are easier: run through the frag list and
2903 check all windows */
2904 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2905 wnd_sum +=
9e903e08 2906 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
2907
2908 if (unlikely(wnd_sum < lso_mss)) {
2909 to_copy = 1;
2910 break;
2911 }
2912 wnd_sum -=
9e903e08 2913 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
2914 }
2915 } else {
2916 /* in non-LSO too fragmented packet should always
2917 be linearized */
2918 to_copy = 1;
2919 }
2920 }
2921
2922exit_lbl:
2923 if (unlikely(to_copy))
2924 DP(NETIF_MSG_TX_QUEUED,
51c1a580 2925 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
2926 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2927 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2928
2929 return to_copy;
2930}
2931#endif
2932
2297a2da
VZ
2933static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2934 u32 xmit_type)
f2e0899f 2935{
2297a2da
VZ
2936 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2937 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2938 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
2939 if ((xmit_type & XMIT_GSO_V6) &&
2940 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 2941 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
2942}
2943
2944/**
e8920674 2945 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 2946 *
e8920674
DK
2947 * @skb: packet skb
2948 * @pbd: parse BD
2949 * @xmit_type: xmit flags
f2e0899f
DK
2950 */
2951static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2952 struct eth_tx_parse_bd_e1x *pbd,
2953 u32 xmit_type)
2954{
2955 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2956 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2957 pbd->tcp_flags = pbd_tcp_flags(skb);
2958
2959 if (xmit_type & XMIT_GSO_V4) {
2960 pbd->ip_id = swab16(ip_hdr(skb)->id);
2961 pbd->tcp_pseudo_csum =
2962 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2963 ip_hdr(skb)->daddr,
2964 0, IPPROTO_TCP, 0));
2965
2966 } else
2967 pbd->tcp_pseudo_csum =
2968 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2969 &ipv6_hdr(skb)->daddr,
2970 0, IPPROTO_TCP, 0));
2971
2972 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2973}
f85582f8 2974
f2e0899f 2975/**
e8920674 2976 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 2977 *
e8920674
DK
2978 * @bp: driver handle
2979 * @skb: packet skb
2980 * @parsing_data: data to be updated
2981 * @xmit_type: xmit flags
f2e0899f 2982 *
e8920674 2983 * 57712 related
f2e0899f
DK
2984 */
2985static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 2986 u32 *parsing_data, u32 xmit_type)
f2e0899f 2987{
e39aece7
VZ
2988 *parsing_data |=
2989 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2990 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2991 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 2992
e39aece7
VZ
2993 if (xmit_type & XMIT_CSUM_TCP) {
2994 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2995 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2996 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 2997
e39aece7
VZ
2998 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2999 } else
3000 /* We support checksum offload for TCP and UDP only.
3001 * No need to pass the UDP header length - it's a constant.
3002 */
3003 return skb_transport_header(skb) +
3004 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3005}
3006
93ef5c02
DK
3007static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3008 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3009{
93ef5c02
DK
3010 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3011
3012 if (xmit_type & XMIT_CSUM_V4)
3013 tx_start_bd->bd_flags.as_bitfield |=
3014 ETH_TX_BD_FLAGS_IP_CSUM;
3015 else
3016 tx_start_bd->bd_flags.as_bitfield |=
3017 ETH_TX_BD_FLAGS_IPV6;
3018
3019 if (!(xmit_type & XMIT_CSUM_TCP))
3020 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3021}
3022
f2e0899f 3023/**
e8920674 3024 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3025 *
e8920674
DK
3026 * @bp: driver handle
3027 * @skb: packet skb
3028 * @pbd: parse BD to be updated
3029 * @xmit_type: xmit flags
f2e0899f
DK
3030 */
3031static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3032 struct eth_tx_parse_bd_e1x *pbd,
3033 u32 xmit_type)
3034{
e39aece7 3035 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3036
3037 /* for now NS flag is not used in Linux */
3038 pbd->global_data =
3039 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3040 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3041
3042 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3043 skb_network_header(skb)) >> 1;
f2e0899f 3044
e39aece7
VZ
3045 hlen += pbd->ip_hlen_w;
3046
3047 /* We support checksum offload for TCP and UDP only */
3048 if (xmit_type & XMIT_CSUM_TCP)
3049 hlen += tcp_hdrlen(skb) / 2;
3050 else
3051 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3052
3053 pbd->total_hlen_w = cpu_to_le16(hlen);
3054 hlen = hlen*2;
3055
3056 if (xmit_type & XMIT_CSUM_TCP) {
3057 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3058
3059 } else {
3060 s8 fix = SKB_CS_OFF(skb); /* signed! */
3061
3062 DP(NETIF_MSG_TX_QUEUED,
3063 "hlen %d fix %d csum before fix %x\n",
3064 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3065
3066 /* HW bug: fixup the CSUM */
3067 pbd->tcp_pseudo_csum =
3068 bnx2x_csum_fix(skb_transport_header(skb),
3069 SKB_CS(skb), fix);
3070
3071 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3072 pbd->tcp_pseudo_csum);
3073 }
3074
3075 return hlen;
3076}
f85582f8 3077
9f6c9258
DK
3078/* called with netif_tx_lock
3079 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3080 * netif_wake_queue()
3081 */
3082netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3083{
3084 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3085
9f6c9258 3086 struct netdev_queue *txq;
6383c0b3 3087 struct bnx2x_fp_txdata *txdata;
9f6c9258 3088 struct sw_tx_bd *tx_buf;
619c5cb6 3089 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3090 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3091 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3092 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 3093 u32 pbd_e2_parsing_data = 0;
9f6c9258 3094 u16 pkt_prod, bd_prod;
65565884 3095 int nbd, txq_index;
9f6c9258
DK
3096 dma_addr_t mapping;
3097 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3098 int i;
3099 u8 hlen = 0;
3100 __le16 pkt_size = 0;
3101 struct ethhdr *eth;
3102 u8 mac_type = UNICAST_ADDRESS;
3103
3104#ifdef BNX2X_STOP_ON_ERROR
3105 if (unlikely(bp->panic))
3106 return NETDEV_TX_BUSY;
3107#endif
3108
6383c0b3
AE
3109 txq_index = skb_get_queue_mapping(skb);
3110 txq = netdev_get_tx_queue(dev, txq_index);
3111
55c11941 3112 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3113
65565884 3114 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3115
3116 /* enable this debug print to view the transmission queue being used
51c1a580 3117 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3118 txq_index, fp_index, txdata_index); */
9f6c9258 3119
6383c0b3 3120 /* enable this debug print to view the tranmission details
51c1a580
MS
3121 DP(NETIF_MSG_TX_QUEUED,
3122 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3123 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3124
6383c0b3 3125 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3126 skb_shinfo(skb)->nr_frags +
3127 BDS_PER_TX_PKT +
3128 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa
DK
3129 /* Handle special storage cases separately */
3130 if (txdata->tx_ring_size != 0) {
3131 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3132 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3133 netif_tx_stop_queue(txq);
3134 }
3135
9f6c9258
DK
3136 return NETDEV_TX_BUSY;
3137 }
3138
51c1a580
MS
3139 DP(NETIF_MSG_TX_QUEUED,
3140 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
6383c0b3 3141 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
3142 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3143
3144 eth = (struct ethhdr *)skb->data;
3145
3146 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3147 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3148 if (is_broadcast_ether_addr(eth->h_dest))
3149 mac_type = BROADCAST_ADDRESS;
3150 else
3151 mac_type = MULTICAST_ADDRESS;
3152 }
3153
3154#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3155 /* First, check if we need to linearize the skb (due to FW
3156 restrictions). No need to check fragmentation if page size > 8K
3157 (there will be no violation to FW restrictions) */
3158 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3159 /* Statistics of linearization */
3160 bp->lin_cnt++;
3161 if (skb_linearize(skb) != 0) {
51c1a580
MS
3162 DP(NETIF_MSG_TX_QUEUED,
3163 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3164 dev_kfree_skb_any(skb);
3165 return NETDEV_TX_OK;
3166 }
3167 }
3168#endif
619c5cb6
VZ
3169 /* Map skb linear data for DMA */
3170 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3171 skb_headlen(skb), DMA_TO_DEVICE);
3172 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3173 DP(NETIF_MSG_TX_QUEUED,
3174 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3175 dev_kfree_skb_any(skb);
3176 return NETDEV_TX_OK;
3177 }
9f6c9258
DK
3178 /*
3179 Please read carefully. First we use one BD which we mark as start,
3180 then we have a parsing info BD (used for TSO or xsum),
3181 and only then we have the rest of the TSO BDs.
3182 (don't forget to mark the last one as last,
3183 and to unmap only AFTER you write to the BD ...)
3184 And above all, all pdb sizes are in words - NOT DWORDS!
3185 */
3186
619c5cb6
VZ
3187 /* get current pkt produced now - advance it just before sending packet
3188 * since mapping of pages may fail and cause packet to be dropped
3189 */
6383c0b3
AE
3190 pkt_prod = txdata->tx_pkt_prod;
3191 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3192
619c5cb6
VZ
3193 /* get a tx_buf and first BD
3194 * tx_start_bd may be changed during SPLIT,
3195 * but first_bd will always stay first
3196 */
6383c0b3
AE
3197 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3198 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3199 first_bd = tx_start_bd;
9f6c9258
DK
3200
3201 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
96bed4b9
YM
3202 SET_FLAG(tx_start_bd->general_data,
3203 ETH_TX_START_BD_PARSE_NBDS,
3204 0);
f85582f8 3205
9f6c9258 3206 /* header nbd */
f85582f8 3207 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
3208
3209 /* remember the first BD of the packet */
6383c0b3 3210 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3211 tx_buf->skb = skb;
3212 tx_buf->flags = 0;
3213
3214 DP(NETIF_MSG_TX_QUEUED,
3215 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3216 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3217
eab6d18d 3218 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3219 tx_start_bd->vlan_or_ethertype =
3220 cpu_to_le16(vlan_tx_tag_get(skb));
3221 tx_start_bd->bd_flags.as_bitfield |=
3222 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 3223 } else
523224a3 3224 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
3225
3226 /* turn on parsing and get a BD */
3227 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3228
93ef5c02
DK
3229 if (xmit_type & XMIT_CSUM)
3230 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3231
619c5cb6 3232 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3233 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f
DK
3234 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3235 /* Set PBD in checksum offload case */
3236 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
3237 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3238 &pbd_e2_parsing_data,
3239 xmit_type);
619c5cb6
VZ
3240 if (IS_MF_SI(bp)) {
3241 /*
3242 * fill in the MAC addresses in the PBD - for local
3243 * switching
3244 */
3245 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3246 &pbd_e2->src_mac_addr_mid,
3247 &pbd_e2->src_mac_addr_lo,
3248 eth->h_source);
3249 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3250 &pbd_e2->dst_mac_addr_mid,
3251 &pbd_e2->dst_mac_addr_lo,
3252 eth->h_dest);
3253 }
96bed4b9
YM
3254
3255 SET_FLAG(pbd_e2_parsing_data,
3256 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3257 } else {
96bed4b9 3258 u16 global_data = 0;
6383c0b3 3259 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3260 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3261 /* Set PBD in checksum offload case */
3262 if (xmit_type & XMIT_CSUM)
3263 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3264
96bed4b9
YM
3265 SET_FLAG(global_data,
3266 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3267 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3268 }
3269
f85582f8 3270 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3271 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3272 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
619c5cb6 3273 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
9f6c9258
DK
3274 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3275 pkt_size = tx_start_bd->nbytes;
3276
51c1a580
MS
3277 DP(NETIF_MSG_TX_QUEUED,
3278 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
9f6c9258
DK
3279 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3280 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3281 tx_start_bd->bd_flags.as_bitfield,
3282 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3283
3284 if (xmit_type & XMIT_GSO) {
3285
3286 DP(NETIF_MSG_TX_QUEUED,
3287 "TSO packet len %d hlen %d total len %d tso size %d\n",
3288 skb->len, hlen, skb_headlen(skb),
3289 skb_shinfo(skb)->gso_size);
3290
3291 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3292
3293 if (unlikely(skb_headlen(skb) > hlen))
6383c0b3
AE
3294 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3295 &tx_start_bd, hlen,
3296 bd_prod, ++nbd);
619c5cb6 3297 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3298 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3299 xmit_type);
f2e0899f
DK
3300 else
3301 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 3302 }
2297a2da
VZ
3303
3304 /* Set the PBD's parsing_data field if not zero
3305 * (for the chips newer than 57711).
3306 */
3307 if (pbd_e2_parsing_data)
3308 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3309
9f6c9258
DK
3310 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3311
f85582f8 3312 /* Handle fragmented skb */
9f6c9258
DK
3313 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3314 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3315
9e903e08
ED
3316 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3317 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3318 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3319 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3320
51c1a580
MS
3321 DP(NETIF_MSG_TX_QUEUED,
3322 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3323
3324 /* we need unmap all buffers already mapped
3325 * for this SKB;
3326 * first_bd->nbd need to be properly updated
3327 * before call to bnx2x_free_tx_pkt
3328 */
3329 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3330 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3331 TX_BD(txdata->tx_pkt_prod),
3332 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3333 return NETDEV_TX_OK;
3334 }
3335
9f6c9258 3336 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3337 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3338 if (total_pkt_bd == NULL)
6383c0b3 3339 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3340
9f6c9258
DK
3341 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3342 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3343 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3344 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3345 nbd++;
9f6c9258
DK
3346
3347 DP(NETIF_MSG_TX_QUEUED,
3348 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3349 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3350 le16_to_cpu(tx_data_bd->nbytes));
3351 }
3352
3353 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3354
619c5cb6
VZ
3355 /* update with actual num BDs */
3356 first_bd->nbd = cpu_to_le16(nbd);
3357
9f6c9258
DK
3358 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3359
3360 /* now send a tx doorbell, counting the next BD
3361 * if the packet contains or ends with it
3362 */
3363 if (TX_BD_POFF(bd_prod) < nbd)
3364 nbd++;
3365
619c5cb6
VZ
3366 /* total_pkt_bytes should be set on the first data BD if
3367 * it's not an LSO packet and there is more than one
3368 * data BD. In this case pkt_size is limited by an MTU value.
3369 * However we prefer to set it for an LSO packet (while we don't
3370 * have to) in order to save some CPU cycles in a none-LSO
3371 * case, when we much more care about them.
3372 */
9f6c9258
DK
3373 if (total_pkt_bd != NULL)
3374 total_pkt_bd->total_pkt_bytes = pkt_size;
3375
523224a3 3376 if (pbd_e1x)
9f6c9258 3377 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3378 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3379 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3380 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3381 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3382 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3383 if (pbd_e2)
3384 DP(NETIF_MSG_TX_QUEUED,
3385 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3386 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3387 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3388 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3389 pbd_e2->parsing_data);
9f6c9258
DK
3390 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3391
2df1a70a
TH
3392 netdev_tx_sent_queue(txq, skb->len);
3393
8373c57d
WB
3394 skb_tx_timestamp(skb);
3395
6383c0b3 3396 txdata->tx_pkt_prod++;
9f6c9258
DK
3397 /*
3398 * Make sure that the BD data is updated before updating the producer
3399 * since FW might read the BD right after the producer is updated.
3400 * This is only applicable for weak-ordered memory model archs such
3401 * as IA-64. The following barrier is also mandatory since FW will
3402 * assumes packets must have BDs.
3403 */
3404 wmb();
3405
6383c0b3 3406 txdata->tx_db.data.prod += nbd;
9f6c9258 3407 barrier();
f85582f8 3408
6383c0b3 3409 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
3410
3411 mmiowb();
3412
6383c0b3 3413 txdata->tx_bd_prod += nbd;
9f6c9258 3414
7df2dc6b 3415 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
3416 netif_tx_stop_queue(txq);
3417
3418 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3419 * ordering of set_bit() in netif_tx_stop_queue() and read of
3420 * fp->bd_tx_cons */
3421 smp_mb();
3422
15192a8c 3423 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 3424 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
3425 netif_tx_wake_queue(txq);
3426 }
6383c0b3 3427 txdata->tx_pkt++;
9f6c9258
DK
3428
3429 return NETDEV_TX_OK;
3430}
f85582f8 3431
6383c0b3
AE
3432/**
3433 * bnx2x_setup_tc - routine to configure net_device for multi tc
3434 *
3435 * @netdev: net device to configure
3436 * @tc: number of traffic classes to enable
3437 *
3438 * callback connected to the ndo_setup_tc function pointer
3439 */
3440int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3441{
3442 int cos, prio, count, offset;
3443 struct bnx2x *bp = netdev_priv(dev);
3444
3445 /* setup tc must be called under rtnl lock */
3446 ASSERT_RTNL();
3447
3448 /* no traffic classes requested. aborting */
3449 if (!num_tc) {
3450 netdev_reset_tc(dev);
3451 return 0;
3452 }
3453
3454 /* requested to support too many traffic classes */
3455 if (num_tc > bp->max_cos) {
51c1a580
MS
3456 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3457 num_tc, bp->max_cos);
6383c0b3
AE
3458 return -EINVAL;
3459 }
3460
3461 /* declare amount of supported traffic classes */
3462 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 3463 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
3464 return -EINVAL;
3465 }
3466
3467 /* configure priority to traffic class mapping */
3468 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3469 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
3470 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3471 "mapping priority %d to tc %d\n",
6383c0b3
AE
3472 prio, bp->prio_to_cos[prio]);
3473 }
3474
3475
3476 /* Use this configuration to diffrentiate tc0 from other COSes
3477 This can be used for ets or pfc, and save the effort of setting
3478 up a multio class queue disc or negotiating DCBX with a switch
3479 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 3480 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
3481 for (prio = 1; prio < 16; prio++) {
3482 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 3483 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
3484 } */
3485
3486 /* configure traffic class to transmission queue mapping */
3487 for (cos = 0; cos < bp->max_cos; cos++) {
3488 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 3489 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 3490 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
3491 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3492 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
3493 cos, offset, count);
3494 }
3495
3496 return 0;
3497}
3498
9f6c9258
DK
3499/* called with rtnl_lock */
3500int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3501{
3502 struct sockaddr *addr = p;
3503 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 3504 int rc = 0;
9f6c9258 3505
51c1a580
MS
3506 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3507 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 3508 return -EINVAL;
51c1a580 3509 }
614c76df 3510
a3348722
BW
3511 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3512 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 3513 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 3514 return -EINVAL;
51c1a580 3515 }
9f6c9258 3516
619c5cb6
VZ
3517 if (netif_running(dev)) {
3518 rc = bnx2x_set_eth_mac(bp, false);
3519 if (rc)
3520 return rc;
3521 }
3522
7ce5d222 3523 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
9f6c9258 3524 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 3525
523224a3 3526 if (netif_running(dev))
619c5cb6 3527 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 3528
619c5cb6 3529 return rc;
9f6c9258
DK
3530}
3531
b3b83c3f
DK
3532static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3533{
3534 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3535 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 3536 u8 cos;
b3b83c3f
DK
3537
3538 /* Common */
55c11941 3539
b3b83c3f
DK
3540 if (IS_FCOE_IDX(fp_index)) {
3541 memset(sb, 0, sizeof(union host_hc_status_block));
3542 fp->status_blk_mapping = 0;
b3b83c3f 3543 } else {
b3b83c3f 3544 /* status blocks */
619c5cb6 3545 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3546 BNX2X_PCI_FREE(sb->e2_sb,
3547 bnx2x_fp(bp, fp_index,
3548 status_blk_mapping),
3549 sizeof(struct host_hc_status_block_e2));
3550 else
3551 BNX2X_PCI_FREE(sb->e1x_sb,
3552 bnx2x_fp(bp, fp_index,
3553 status_blk_mapping),
3554 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 3555 }
55c11941 3556
b3b83c3f
DK
3557 /* Rx */
3558 if (!skip_rx_queue(bp, fp_index)) {
3559 bnx2x_free_rx_bds(fp);
3560
3561 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3562 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3563 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3564 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3565 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3566
3567 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3568 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3569 sizeof(struct eth_fast_path_rx_cqe) *
3570 NUM_RCQ_BD);
3571
3572 /* SGE ring */
3573 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3574 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3575 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3576 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3577 }
3578
3579 /* Tx */
3580 if (!skip_tx_queue(bp, fp_index)) {
3581 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 3582 for_each_cos_in_tx_queue(fp, cos) {
65565884 3583 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 3584
51c1a580 3585 DP(NETIF_MSG_IFDOWN,
94f05b0f 3586 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
3587 fp_index, cos, txdata->cid);
3588
3589 BNX2X_FREE(txdata->tx_buf_ring);
3590 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3591 txdata->tx_desc_mapping,
3592 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3593 }
b3b83c3f
DK
3594 }
3595 /* end of fastpath */
3596}
3597
55c11941
MS
3598void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3599{
3600 int i;
3601 for_each_cnic_queue(bp, i)
3602 bnx2x_free_fp_mem_at(bp, i);
3603}
3604
b3b83c3f
DK
3605void bnx2x_free_fp_mem(struct bnx2x *bp)
3606{
3607 int i;
55c11941 3608 for_each_eth_queue(bp, i)
b3b83c3f
DK
3609 bnx2x_free_fp_mem_at(bp, i);
3610}
3611
1191cb83 3612static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
3613{
3614 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 3615 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
3616 bnx2x_fp(bp, index, sb_index_values) =
3617 (__le16 *)status_blk.e2_sb->sb.index_values;
3618 bnx2x_fp(bp, index, sb_running_index) =
3619 (__le16 *)status_blk.e2_sb->sb.running_index;
3620 } else {
3621 bnx2x_fp(bp, index, sb_index_values) =
3622 (__le16 *)status_blk.e1x_sb->sb.index_values;
3623 bnx2x_fp(bp, index, sb_running_index) =
3624 (__le16 *)status_blk.e1x_sb->sb.running_index;
3625 }
3626}
3627
1191cb83
ED
3628/* Returns the number of actually allocated BDs */
3629static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3630 int rx_ring_size)
3631{
3632 struct bnx2x *bp = fp->bp;
3633 u16 ring_prod, cqe_ring_prod;
3634 int i, failure_cnt = 0;
3635
3636 fp->rx_comp_cons = 0;
3637 cqe_ring_prod = ring_prod = 0;
3638
3639 /* This routine is called only during fo init so
3640 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3641 */
3642 for (i = 0; i < rx_ring_size; i++) {
3643 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3644 failure_cnt++;
3645 continue;
3646 }
3647 ring_prod = NEXT_RX_IDX(ring_prod);
3648 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3649 WARN_ON(ring_prod <= (i - failure_cnt));
3650 }
3651
3652 if (failure_cnt)
3653 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3654 i - failure_cnt, fp->index);
3655
3656 fp->rx_bd_prod = ring_prod;
3657 /* Limit the CQE producer by the CQE ring size */
3658 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3659 cqe_ring_prod);
3660 fp->rx_pkt = fp->rx_calls = 0;
3661
15192a8c 3662 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
3663
3664 return i - failure_cnt;
3665}
3666
3667static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3668{
3669 int i;
3670
3671 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3672 struct eth_rx_cqe_next_page *nextpg;
3673
3674 nextpg = (struct eth_rx_cqe_next_page *)
3675 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3676 nextpg->addr_hi =
3677 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3678 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3679 nextpg->addr_lo =
3680 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3681 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3682 }
3683}
3684
b3b83c3f
DK
3685static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3686{
3687 union host_hc_status_block *sb;
3688 struct bnx2x_fastpath *fp = &bp->fp[index];
3689 int ring_size = 0;
6383c0b3 3690 u8 cos;
c2188952 3691 int rx_ring_size = 0;
b3b83c3f 3692
a3348722
BW
3693 if (!bp->rx_ring_size &&
3694 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
3695 rx_ring_size = MIN_RX_SIZE_NONTPA;
3696 bp->rx_ring_size = rx_ring_size;
55c11941 3697 } else if (!bp->rx_ring_size) {
c2188952
VZ
3698 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3699
065f8b92
YM
3700 if (CHIP_IS_E3(bp)) {
3701 u32 cfg = SHMEM_RD(bp,
3702 dev_info.port_hw_config[BP_PORT(bp)].
3703 default_cfg);
3704
3705 /* Decrease ring size for 1G functions */
3706 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3707 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3708 rx_ring_size /= 10;
3709 }
d760fc37 3710
c2188952
VZ
3711 /* allocate at least number of buffers required by FW */
3712 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3713 MIN_RX_SIZE_TPA, rx_ring_size);
3714
3715 bp->rx_ring_size = rx_ring_size;
614c76df 3716 } else /* if rx_ring_size specified - use it */
c2188952 3717 rx_ring_size = bp->rx_ring_size;
b3b83c3f 3718
b3b83c3f
DK
3719 /* Common */
3720 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 3721
b3b83c3f 3722 if (!IS_FCOE_IDX(index)) {
b3b83c3f 3723 /* status blocks */
619c5cb6 3724 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3725 BNX2X_PCI_ALLOC(sb->e2_sb,
3726 &bnx2x_fp(bp, index, status_blk_mapping),
3727 sizeof(struct host_hc_status_block_e2));
3728 else
3729 BNX2X_PCI_ALLOC(sb->e1x_sb,
3730 &bnx2x_fp(bp, index, status_blk_mapping),
3731 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 3732 }
8eef2af1
DK
3733
3734 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3735 * set shortcuts for it.
3736 */
3737 if (!IS_FCOE_IDX(index))
3738 set_sb_shortcuts(bp, index);
b3b83c3f
DK
3739
3740 /* Tx */
3741 if (!skip_tx_queue(bp, index)) {
3742 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 3743 for_each_cos_in_tx_queue(fp, cos) {
65565884 3744 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 3745
51c1a580
MS
3746 DP(NETIF_MSG_IFUP,
3747 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
3748 index, cos);
3749
3750 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 3751 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
3752 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3753 &txdata->tx_desc_mapping,
b3b83c3f 3754 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 3755 }
b3b83c3f
DK
3756 }
3757
3758 /* Rx */
3759 if (!skip_rx_queue(bp, index)) {
3760 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3761 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3762 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3763 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3764 &bnx2x_fp(bp, index, rx_desc_mapping),
3765 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3766
3767 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3768 &bnx2x_fp(bp, index, rx_comp_mapping),
3769 sizeof(struct eth_fast_path_rx_cqe) *
3770 NUM_RCQ_BD);
3771
3772 /* SGE ring */
3773 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3774 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3775 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3776 &bnx2x_fp(bp, index, rx_sge_mapping),
3777 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3778 /* RX BD ring */
3779 bnx2x_set_next_page_rx_bd(fp);
3780
3781 /* CQ ring */
3782 bnx2x_set_next_page_rx_cq(fp);
3783
3784 /* BDs */
3785 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3786 if (ring_size < rx_ring_size)
3787 goto alloc_mem_err;
3788 }
3789
3790 return 0;
3791
3792/* handles low memory cases */
3793alloc_mem_err:
3794 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3795 index, ring_size);
3796 /* FW will drop all packets if queue is not big enough,
3797 * In these cases we disable the queue
6383c0b3 3798 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
3799 */
3800 if (ring_size < (fp->disable_tpa ?
eb722d7a 3801 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
3802 /* release memory allocated for this queue */
3803 bnx2x_free_fp_mem_at(bp, index);
3804 return -ENOMEM;
3805 }
3806 return 0;
3807}
3808
55c11941
MS
3809int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3810{
3811 if (!NO_FCOE(bp))
3812 /* FCoE */
3813 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3814 /* we will fail load process instead of mark
3815 * NO_FCOE_FLAG
3816 */
3817 return -ENOMEM;
3818
3819 return 0;
3820}
3821
b3b83c3f
DK
3822int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3823{
3824 int i;
3825
55c11941
MS
3826 /* 1. Allocate FP for leading - fatal if error
3827 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
3828 */
3829
3830 /* leading */
3831 if (bnx2x_alloc_fp_mem_at(bp, 0))
3832 return -ENOMEM;
6383c0b3 3833
b3b83c3f
DK
3834 /* RSS */
3835 for_each_nondefault_eth_queue(bp, i)
3836 if (bnx2x_alloc_fp_mem_at(bp, i))
3837 break;
3838
3839 /* handle memory failures */
3840 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3841 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3842
3843 WARN_ON(delta < 0);
55c11941
MS
3844 if (CNIC_SUPPORT(bp))
3845 /* move non eth FPs next to last eth FP
3846 * must be done in that order
3847 * FCOE_IDX < FWD_IDX < OOO_IDX
3848 */
b3b83c3f 3849
55c11941
MS
3850 /* move FCoE fp even NO_FCOE_FLAG is on */
3851 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3852 bp->num_ethernet_queues -= delta;
3853 bp->num_queues = bp->num_ethernet_queues +
3854 bp->num_cnic_queues;
b3b83c3f
DK
3855 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3856 bp->num_queues + delta, bp->num_queues);
3857 }
3858
3859 return 0;
3860}
d6214d7a 3861
523224a3
DK
3862void bnx2x_free_mem_bp(struct bnx2x *bp)
3863{
15192a8c 3864 kfree(bp->fp->tpa_info);
523224a3 3865 kfree(bp->fp);
15192a8c
BW
3866 kfree(bp->sp_objs);
3867 kfree(bp->fp_stats);
65565884 3868 kfree(bp->bnx2x_txq);
523224a3
DK
3869 kfree(bp->msix_table);
3870 kfree(bp->ilt);
3871}
3872
3873int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3874{
3875 struct bnx2x_fastpath *fp;
3876 struct msix_entry *tbl;
3877 struct bnx2x_ilt *ilt;
6383c0b3 3878 int msix_table_size = 0;
55c11941 3879 int fp_array_size, txq_array_size;
15192a8c 3880 int i;
6383c0b3
AE
3881
3882 /*
3883 * The biggest MSI-X table we might need is as a maximum number of fast
3884 * path IGU SBs plus default SB (for PF).
3885 */
3886 msix_table_size = bp->igu_sb_cnt + 1;
523224a3 3887
6383c0b3 3888 /* fp array: RSS plus CNIC related L2 queues */
55c11941 3889 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
15192a8c
BW
3890 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3891
3892 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
3893 if (!fp)
3894 goto alloc_err;
15192a8c
BW
3895 for (i = 0; i < fp_array_size; i++) {
3896 fp[i].tpa_info =
3897 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
3898 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
3899 if (!(fp[i].tpa_info))
3900 goto alloc_err;
3901 }
3902
523224a3
DK
3903 bp->fp = fp;
3904
15192a8c
BW
3905 /* allocate sp objs */
3906 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
3907 GFP_KERNEL);
3908 if (!bp->sp_objs)
3909 goto alloc_err;
3910
3911 /* allocate fp_stats */
3912 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
3913 GFP_KERNEL);
3914 if (!bp->fp_stats)
3915 goto alloc_err;
3916
65565884 3917 /* Allocate memory for the transmission queues array */
55c11941
MS
3918 txq_array_size =
3919 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3920 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3921
3922 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3923 GFP_KERNEL);
65565884
MS
3924 if (!bp->bnx2x_txq)
3925 goto alloc_err;
3926
523224a3 3927 /* msix table */
01e23742 3928 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
3929 if (!tbl)
3930 goto alloc_err;
3931 bp->msix_table = tbl;
3932
3933 /* ilt */
3934 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3935 if (!ilt)
3936 goto alloc_err;
3937 bp->ilt = ilt;
3938
3939 return 0;
3940alloc_err:
3941 bnx2x_free_mem_bp(bp);
3942 return -ENOMEM;
3943
3944}
3945
a9fccec7 3946int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
3947{
3948 struct bnx2x *bp = netdev_priv(dev);
3949
3950 if (unlikely(!netif_running(dev)))
3951 return 0;
3952
5d07d868 3953 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
3954 return bnx2x_nic_load(bp, LOAD_NORMAL);
3955}
3956
1ac9e428
YR
3957int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3958{
3959 u32 sel_phy_idx = 0;
3960 if (bp->link_params.num_phys <= 1)
3961 return INT_PHY;
3962
3963 if (bp->link_vars.link_up) {
3964 sel_phy_idx = EXT_PHY1;
3965 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3966 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3967 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3968 sel_phy_idx = EXT_PHY2;
3969 } else {
3970
3971 switch (bnx2x_phy_selection(&bp->link_params)) {
3972 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3973 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3974 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3975 sel_phy_idx = EXT_PHY1;
3976 break;
3977 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3978 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3979 sel_phy_idx = EXT_PHY2;
3980 break;
3981 }
3982 }
3983
3984 return sel_phy_idx;
3985
3986}
3987int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3988{
3989 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3990 /*
3991 * The selected actived PHY is always after swapping (in case PHY
3992 * swapping is enabled). So when swapping is enabled, we need to reverse
3993 * the configuration
3994 */
3995
3996 if (bp->link_params.multi_phy_config &
3997 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3998 if (sel_phy_idx == EXT_PHY1)
3999 sel_phy_idx = EXT_PHY2;
4000 else if (sel_phy_idx == EXT_PHY2)
4001 sel_phy_idx = EXT_PHY1;
4002 }
4003 return LINK_CONFIG_IDX(sel_phy_idx);
4004}
4005
55c11941 4006#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4007int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4008{
4009 struct bnx2x *bp = netdev_priv(dev);
4010 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4011
4012 switch (type) {
4013 case NETDEV_FCOE_WWNN:
4014 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4015 cp->fcoe_wwn_node_name_lo);
4016 break;
4017 case NETDEV_FCOE_WWPN:
4018 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4019 cp->fcoe_wwn_port_name_lo);
4020 break;
4021 default:
51c1a580 4022 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4023 return -EINVAL;
4024 }
4025
4026 return 0;
4027}
4028#endif
4029
9f6c9258
DK
4030/* called with rtnl_lock */
4031int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4032{
4033 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4034
4035 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4036 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4037 return -EAGAIN;
4038 }
4039
4040 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4041 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4042 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4043 return -EINVAL;
51c1a580 4044 }
9f6c9258
DK
4045
4046 /* This does not race with packet allocation
4047 * because the actual alloc size is
4048 * only updated as part of load
4049 */
4050 dev->mtu = new_mtu;
4051
66371c44
MM
4052 return bnx2x_reload_if_running(dev);
4053}
4054
c8f44aff 4055netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4056 netdev_features_t features)
66371c44
MM
4057{
4058 struct bnx2x *bp = netdev_priv(dev);
4059
4060 /* TPA requires Rx CSUM offloading */
621b4d66 4061 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4062 features &= ~NETIF_F_LRO;
621b4d66
DK
4063 features &= ~NETIF_F_GRO;
4064 }
66371c44
MM
4065
4066 return features;
4067}
4068
c8f44aff 4069int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4070{
4071 struct bnx2x *bp = netdev_priv(dev);
4072 u32 flags = bp->flags;
538dd2e3 4073 bool bnx2x_reload = false;
66371c44
MM
4074
4075 if (features & NETIF_F_LRO)
4076 flags |= TPA_ENABLE_FLAG;
4077 else
4078 flags &= ~TPA_ENABLE_FLAG;
4079
621b4d66
DK
4080 if (features & NETIF_F_GRO)
4081 flags |= GRO_ENABLE_FLAG;
4082 else
4083 flags &= ~GRO_ENABLE_FLAG;
4084
538dd2e3
MB
4085 if (features & NETIF_F_LOOPBACK) {
4086 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4087 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4088 bnx2x_reload = true;
4089 }
4090 } else {
4091 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4092 bp->link_params.loopback_mode = LOOPBACK_NONE;
4093 bnx2x_reload = true;
4094 }
4095 }
4096
66371c44
MM
4097 if (flags ^ bp->flags) {
4098 bp->flags = flags;
538dd2e3
MB
4099 bnx2x_reload = true;
4100 }
66371c44 4101
538dd2e3 4102 if (bnx2x_reload) {
66371c44
MM
4103 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4104 return bnx2x_reload_if_running(dev);
4105 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4106 }
4107
66371c44 4108 return 0;
9f6c9258
DK
4109}
4110
4111void bnx2x_tx_timeout(struct net_device *dev)
4112{
4113 struct bnx2x *bp = netdev_priv(dev);
4114
4115#ifdef BNX2X_STOP_ON_ERROR
4116 if (!bp->panic)
4117 bnx2x_panic();
4118#endif
7be08a72
AE
4119
4120 smp_mb__before_clear_bit();
4121 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4122 smp_mb__after_clear_bit();
4123
9f6c9258 4124 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4125 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4126}
4127
9f6c9258
DK
4128int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4129{
4130 struct net_device *dev = pci_get_drvdata(pdev);
4131 struct bnx2x *bp;
4132
4133 if (!dev) {
4134 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4135 return -ENODEV;
4136 }
4137 bp = netdev_priv(dev);
4138
4139 rtnl_lock();
4140
4141 pci_save_state(pdev);
4142
4143 if (!netif_running(dev)) {
4144 rtnl_unlock();
4145 return 0;
4146 }
4147
4148 netif_device_detach(dev);
4149
5d07d868 4150 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4151
4152 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4153
4154 rtnl_unlock();
4155
4156 return 0;
4157}
4158
4159int bnx2x_resume(struct pci_dev *pdev)
4160{
4161 struct net_device *dev = pci_get_drvdata(pdev);
4162 struct bnx2x *bp;
4163 int rc;
4164
4165 if (!dev) {
4166 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4167 return -ENODEV;
4168 }
4169 bp = netdev_priv(dev);
4170
4171 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4172 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4173 return -EAGAIN;
4174 }
4175
4176 rtnl_lock();
4177
4178 pci_restore_state(pdev);
4179
4180 if (!netif_running(dev)) {
4181 rtnl_unlock();
4182 return 0;
4183 }
4184
4185 bnx2x_set_power_state(bp, PCI_D0);
4186 netif_device_attach(dev);
4187
4188 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4189
4190 rtnl_unlock();
4191
4192 return rc;
4193}
619c5cb6
VZ
4194
4195
4196void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4197 u32 cid)
4198{
4199 /* ustorm cxt validation */
4200 cxt->ustorm_ag_context.cdu_usage =
4201 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4202 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4203 /* xcontext validation */
4204 cxt->xstorm_ag_context.cdu_reserved =
4205 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4206 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4207}
4208
1191cb83
ED
4209static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4210 u8 fw_sb_id, u8 sb_index,
4211 u8 ticks)
619c5cb6
VZ
4212{
4213
4214 u32 addr = BAR_CSTRORM_INTMEM +
4215 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4216 REG_WR8(bp, addr, ticks);
51c1a580
MS
4217 DP(NETIF_MSG_IFUP,
4218 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4219 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4220}
4221
1191cb83
ED
4222static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4223 u16 fw_sb_id, u8 sb_index,
4224 u8 disable)
619c5cb6
VZ
4225{
4226 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4227 u32 addr = BAR_CSTRORM_INTMEM +
4228 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4229 u16 flags = REG_RD16(bp, addr);
4230 /* clear and set */
4231 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4232 flags |= enable_flag;
4233 REG_WR16(bp, addr, flags);
51c1a580
MS
4234 DP(NETIF_MSG_IFUP,
4235 "port %x fw_sb_id %d sb_index %d disable %d\n",
4236 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4237}
4238
4239void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4240 u8 sb_index, u8 disable, u16 usec)
4241{
4242 int port = BP_PORT(bp);
4243 u8 ticks = usec / BNX2X_BTR;
4244
4245 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4246
4247 disable = disable ? 1 : (usec ? 0 : 1);
4248 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4249}