netpoll: fix incorrect access to skb data in __netpoll_rx
[linux-2.6-block.git] / drivers / net / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
5de92408 3 * Copyright (c) 2007-2011 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
9f6c9258 18#include <linux/etherdevice.h>
9bcc0893 19#include <linux/if_vlan.h>
a6b7a407 20#include <linux/interrupt.h>
9f6c9258 21#include <linux/ip.h>
f2e0899f 22#include <net/ipv6.h>
7f3e01fe 23#include <net/ip6_checksum.h>
6891dd25 24#include <linux/firmware.h>
c0cba59e 25#include <linux/prefetch.h>
9f6c9258 26#include "bnx2x_cmn.h"
523224a3 27#include "bnx2x_init.h"
042181f5 28#include "bnx2x_sp.h"
523224a3 29
619c5cb6 30
9f6c9258 31
b3b83c3f
DK
32/**
33 * bnx2x_bz_fp - zero content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @index: fastpath index to be zeroed
37 *
38 * Makes sure the contents of the bp->fp[index].napi is kept
39 * intact.
40 */
41static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42{
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
47
48 /* Restore the NAPI object as it has been already initialized */
49 fp->napi = orig_napi;
6383c0b3
AE
50
51 fp->bp = bp;
52 fp->index = index;
53 if (IS_ETH_FP(fp))
54 fp->max_cos = bp->max_cos;
55 else
56 /* Special queues support only one CoS */
57 fp->max_cos = 1;
58
59 /*
60 * set the tpa flag for each queue. The tpa flag determines the queue
61 * minimal size so it must be set prior to queue memory allocation
62 */
63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
64
65#ifdef BCM_CNIC
45d3539a
VZ
66 /* We don't want TPA on an FCoE L2 ring */
67 if (IS_FCOE_FP(fp))
68 fp->disable_tpa = 1;
6383c0b3 69#endif
b3b83c3f
DK
70}
71
72/**
73 * bnx2x_move_fp - move content of the fastpath structure.
74 *
75 * @bp: driver handle
76 * @from: source FP index
77 * @to: destination FP index
78 *
79 * Makes sure the contents of the bp->fp[to].napi is kept
80 * intact.
81 */
82static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
83{
84 struct bnx2x_fastpath *from_fp = &bp->fp[from];
85 struct bnx2x_fastpath *to_fp = &bp->fp[to];
86 struct napi_struct orig_napi = to_fp->napi;
87 /* Move bnx2x_fastpath contents */
88 memcpy(to_fp, from_fp, sizeof(*to_fp));
89 to_fp->index = to;
90
91 /* Restore the NAPI object as it has been already initialized */
92 to_fp->napi = orig_napi;
93}
94
619c5cb6
VZ
95int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
96
9f6c9258
DK
97/* free skb in the packet ring at pos idx
98 * return idx of last bd freed
99 */
6383c0b3 100static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
101 u16 idx)
102{
6383c0b3 103 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
104 struct eth_tx_start_bd *tx_start_bd;
105 struct eth_tx_bd *tx_data_bd;
106 struct sk_buff *skb = tx_buf->skb;
107 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
108 int nbd;
109
110 /* prefetch skb end pointer to speedup dev_kfree_skb() */
111 prefetch(&skb->end);
112
619c5cb6 113 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 114 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
115
116 /* unmap first bd */
117 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
6383c0b3 118 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 119 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 120 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 121
619c5cb6 122
9f6c9258
DK
123 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
124#ifdef BNX2X_STOP_ON_ERROR
125 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
126 BNX2X_ERR("BAD nbd!\n");
127 bnx2x_panic();
128 }
129#endif
130 new_cons = nbd + tx_buf->first_bd;
131
132 /* Get the next bd */
133 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
134
135 /* Skip a parse bd... */
136 --nbd;
137 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
138
139 /* ...and the TSO split header bd since they have no mapping */
140 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
141 --nbd;
142 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
143 }
144
145 /* now free frags */
146 while (nbd > 0) {
147
148 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
6383c0b3 149 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
150 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
151 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
152 if (--nbd)
153 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
154 }
155
156 /* release skb */
157 WARN_ON(!skb);
40955532 158 dev_kfree_skb_any(skb);
9f6c9258
DK
159 tx_buf->first_bd = 0;
160 tx_buf->skb = NULL;
161
162 return new_cons;
163}
164
6383c0b3 165int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 166{
9f6c9258 167 struct netdev_queue *txq;
6383c0b3 168 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
9f6c9258
DK
169
170#ifdef BNX2X_STOP_ON_ERROR
171 if (unlikely(bp->panic))
172 return -1;
173#endif
174
6383c0b3
AE
175 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
176 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
177 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
178
179 while (sw_cons != hw_cons) {
180 u16 pkt_cons;
181
182 pkt_cons = TX_BD(sw_cons);
183
f2e0899f
DK
184 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
185 " pkt_cons %u\n",
6383c0b3 186 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 187
6383c0b3 188 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
9f6c9258
DK
189 sw_cons++;
190 }
191
6383c0b3
AE
192 txdata->tx_pkt_cons = sw_cons;
193 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
194
195 /* Need to make the tx_bd_cons update visible to start_xmit()
196 * before checking for netif_tx_queue_stopped(). Without the
197 * memory barrier, there is a small possibility that
198 * start_xmit() will miss it and cause the queue to be stopped
199 * forever.
619c5cb6
VZ
200 * On the other hand we need an rmb() here to ensure the proper
201 * ordering of bit testing in the following
202 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
203 */
204 smp_mb();
205
9f6c9258
DK
206 if (unlikely(netif_tx_queue_stopped(txq))) {
207 /* Taking tx_lock() is needed to prevent reenabling the queue
208 * while it's empty. This could have happen if rx_action() gets
209 * suspended in bnx2x_tx_int() after the condition before
210 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
211 *
212 * stops the queue->sees fresh tx_bd_cons->releases the queue->
213 * sends some packets consuming the whole queue again->
214 * stops the queue
215 */
216
217 __netif_tx_lock(txq, smp_processor_id());
218
219 if ((netif_tx_queue_stopped(txq)) &&
220 (bp->state == BNX2X_STATE_OPEN) &&
6383c0b3 221 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
9f6c9258
DK
222 netif_tx_wake_queue(txq);
223
224 __netif_tx_unlock(txq);
225 }
226 return 0;
227}
228
229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
230 u16 idx)
231{
232 u16 last_max = fp->last_max_sge;
233
234 if (SUB_S16(idx, last_max) > 0)
235 fp->last_max_sge = idx;
236}
237
238static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
239 struct eth_fast_path_rx_cqe *fp_cqe)
240{
241 struct bnx2x *bp = fp->bp;
242 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
243 le16_to_cpu(fp_cqe->len_on_bd)) >>
244 SGE_PAGE_SHIFT;
245 u16 last_max, last_elem, first_elem;
246 u16 delta = 0;
247 u16 i;
248
249 if (!sge_len)
250 return;
251
252 /* First mark all used pages */
253 for (i = 0; i < sge_len; i++)
619c5cb6 254 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
523224a3 255 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
256
257 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 258 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
259
260 /* Here we assume that the last SGE index is the biggest */
261 prefetch((void *)(fp->sge_mask));
523224a3
DK
262 bnx2x_update_last_max_sge(fp,
263 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
264
265 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
266 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
267 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
268
269 /* If ring is not full */
270 if (last_elem + 1 != first_elem)
271 last_elem++;
272
273 /* Now update the prod */
274 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
275 if (likely(fp->sge_mask[i]))
276 break;
277
619c5cb6
VZ
278 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
279 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
280 }
281
282 if (delta > 0) {
283 fp->rx_sge_prod += delta;
284 /* clear page-end entries */
285 bnx2x_clear_sge_mask_next_elems(fp);
286 }
287
288 DP(NETIF_MSG_RX_STATUS,
289 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
290 fp->last_max_sge, fp->rx_sge_prod);
291}
292
293static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
619c5cb6
VZ
294 struct sk_buff *skb, u16 cons, u16 prod,
295 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
296{
297 struct bnx2x *bp = fp->bp;
298 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
299 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
300 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
301 dma_addr_t mapping;
619c5cb6
VZ
302 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
303 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 304
619c5cb6
VZ
305 /* print error if current state != stop */
306 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
307 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
308
619c5cb6
VZ
309 /* Try to map an empty skb from the aggregation info */
310 mapping = dma_map_single(&bp->pdev->dev,
311 first_buf->skb->data,
312 fp->rx_buf_size, DMA_FROM_DEVICE);
313 /*
314 * ...if it fails - move the skb from the consumer to the producer
315 * and set the current aggregation state as ERROR to drop it
316 * when TPA_STOP arrives.
317 */
318
319 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
320 /* Move the BD from the consumer to the producer */
321 bnx2x_reuse_rx_skb(fp, cons, prod);
322 tpa_info->tpa_state = BNX2X_TPA_ERROR;
323 return;
324 }
9f6c9258 325
619c5cb6
VZ
326 /* move empty skb from pool to prod */
327 prod_rx_buf->skb = first_buf->skb;
328 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
9f6c9258
DK
329 /* point prod_bd to new skb */
330 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
331 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
332
619c5cb6
VZ
333 /* move partial skb from cons to pool (don't unmap yet) */
334 *first_buf = *cons_rx_buf;
335
336 /* mark bin state as START */
337 tpa_info->parsing_flags =
338 le16_to_cpu(cqe->pars_flags.flags);
339 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
340 tpa_info->tpa_state = BNX2X_TPA_START;
341 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
342 tpa_info->placement_offset = cqe->placement_offset;
343
9f6c9258
DK
344#ifdef BNX2X_STOP_ON_ERROR
345 fp->tpa_queue_used |= (1 << queue);
346#ifdef _ASM_GENERIC_INT_L64_H
347 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
348#else
349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
350#endif
351 fp->tpa_queue_used);
352#endif
353}
354
e4e3c02a
VZ
355/* Timestamp option length allowed for TPA aggregation:
356 *
357 * nop nop kind length echo val
358 */
359#define TPA_TSTAMP_OPT_LEN 12
360/**
e8920674 361 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 362 *
e8920674
DK
363 * @bp: driver handle
364 * @parsing_flags: parsing flags from the START CQE
365 * @len_on_bd: total length of the first packet for the
366 * aggregation.
367 *
368 * Approximate value of the MSS for this aggregation calculated using
369 * the first packet of it.
e4e3c02a
VZ
370 */
371static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
372 u16 len_on_bd)
373{
619c5cb6
VZ
374 /*
375 * TPA arrgregation won't have either IP options or TCP options
376 * other than timestamp or IPv6 extension headers.
e4e3c02a 377 */
619c5cb6
VZ
378 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
379
380 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
381 PRS_FLAG_OVERETH_IPV6)
382 hdrs_len += sizeof(struct ipv6hdr);
383 else /* IPv4 */
384 hdrs_len += sizeof(struct iphdr);
e4e3c02a
VZ
385
386
387 /* Check if there was a TCP timestamp, if there is it's will
388 * always be 12 bytes length: nop nop kind length echo val.
389 *
390 * Otherwise FW would close the aggregation.
391 */
392 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
393 hdrs_len += TPA_TSTAMP_OPT_LEN;
394
395 return len_on_bd - hdrs_len;
396}
397
9f6c9258 398static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619c5cb6
VZ
399 u16 queue, struct sk_buff *skb,
400 struct eth_end_agg_rx_cqe *cqe,
401 u16 cqe_idx)
9f6c9258
DK
402{
403 struct sw_rx_page *rx_pg, old_rx_pg;
9f6c9258
DK
404 u32 i, frag_len, frag_size, pages;
405 int err;
406 int j;
619c5cb6
VZ
407 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
408 u16 len_on_bd = tpa_info->len_on_bd;
9f6c9258 409
619c5cb6 410 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
9f6c9258
DK
411 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
412
413 /* This is needed in order to enable forwarding support */
414 if (frag_size)
619c5cb6
VZ
415 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
416 tpa_info->parsing_flags, len_on_bd);
9f6c9258
DK
417
418#ifdef BNX2X_STOP_ON_ERROR
419 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
420 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
421 pages, cqe_idx);
619c5cb6 422 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
423 bnx2x_panic();
424 return -EINVAL;
425 }
426#endif
427
428 /* Run through the SGL and compose the fragmented skb */
429 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 430 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
431
432 /* FW gives the indices of the SGE as if the ring is an array
433 (meaning that "next" element will consume 2 indices) */
434 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
435 rx_pg = &fp->rx_page_ring[sge_idx];
436 old_rx_pg = *rx_pg;
437
438 /* If we fail to allocate a substitute page, we simply stop
439 where we are and drop the whole packet */
440 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
441 if (unlikely(err)) {
442 fp->eth_q_stats.rx_skb_alloc_failed++;
443 return err;
444 }
445
446 /* Unmap the page as we r going to pass it to the stack */
447 dma_unmap_page(&bp->pdev->dev,
448 dma_unmap_addr(&old_rx_pg, mapping),
449 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
450
451 /* Add one frag and update the appropriate fields in the skb */
452 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
453
454 skb->data_len += frag_len;
455 skb->truesize += frag_len;
456 skb->len += frag_len;
457
458 frag_size -= frag_len;
459 }
460
461 return 0;
462}
463
464static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619c5cb6 465 u16 queue, struct eth_end_agg_rx_cqe *cqe,
9f6c9258
DK
466 u16 cqe_idx)
467{
619c5cb6
VZ
468 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
469 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
470 u8 pad = tpa_info->placement_offset;
471 u16 len = tpa_info->len_on_bd;
9f6c9258
DK
472 struct sk_buff *skb = rx_buf->skb;
473 /* alloc new skb */
619c5cb6
VZ
474 struct sk_buff *new_skb;
475 u8 old_tpa_state = tpa_info->tpa_state;
476
477 tpa_info->tpa_state = BNX2X_TPA_STOP;
478
479 /* If we there was an error during the handling of the TPA_START -
480 * drop this aggregation.
481 */
482 if (old_tpa_state == BNX2X_TPA_ERROR)
483 goto drop;
484
485 /* Try to allocate the new skb */
486 new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
9f6c9258
DK
487
488 /* Unmap skb in the pool anyway, as we are going to change
489 pool entry status to BNX2X_TPA_STOP even if new skb allocation
490 fails. */
491 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 492 fp->rx_buf_size, DMA_FROM_DEVICE);
9f6c9258
DK
493
494 if (likely(new_skb)) {
9f6c9258 495 prefetch(skb);
217de5aa 496 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
497
498#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 499 if (pad + len > fp->rx_buf_size) {
9f6c9258
DK
500 BNX2X_ERR("skb_put is about to fail... "
501 "pad %d len %d rx_buf_size %d\n",
a8c94b91 502 pad, len, fp->rx_buf_size);
9f6c9258
DK
503 bnx2x_panic();
504 return;
505 }
506#endif
507
508 skb_reserve(skb, pad);
509 skb_put(skb, len);
510
511 skb->protocol = eth_type_trans(skb, bp->dev);
512 skb->ip_summed = CHECKSUM_UNNECESSARY;
513
619c5cb6
VZ
514 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
515 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
516 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9bcc0893 517 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
518 } else {
519 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
520 " - dropping packet!\n");
40955532 521 dev_kfree_skb_any(skb);
9f6c9258
DK
522 }
523
524
525 /* put new skb in bin */
619c5cb6 526 rx_buf->skb = new_skb;
9f6c9258 527
619c5cb6 528 return;
9f6c9258
DK
529 }
530
619c5cb6
VZ
531drop:
532 /* drop the packet and keep the buffer in the bin */
533 DP(NETIF_MSG_RX_STATUS,
534 "Failed to allocate or map a new skb - dropping packet!\n");
535 fp->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
536}
537
538/* Set Toeplitz hash value in the skb using the value from the
539 * CQE (calculated by HW).
540 */
541static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
542 struct sk_buff *skb)
543{
544 /* Set Toeplitz hash from CQE */
545 if ((bp->dev->features & NETIF_F_RXHASH) &&
546 (cqe->fast_path_cqe.status_flags &
547 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
548 skb->rxhash =
549 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
550}
551
552int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
553{
554 struct bnx2x *bp = fp->bp;
555 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
556 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
557 int rx_pkt = 0;
558
559#ifdef BNX2X_STOP_ON_ERROR
560 if (unlikely(bp->panic))
561 return 0;
562#endif
563
564 /* CQ "next element" is of the size of the regular element,
565 that's why it's ok here */
566 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
567 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
568 hw_comp_cons++;
569
570 bd_cons = fp->rx_bd_cons;
571 bd_prod = fp->rx_bd_prod;
572 bd_prod_fw = bd_prod;
573 sw_comp_cons = fp->rx_comp_cons;
574 sw_comp_prod = fp->rx_comp_prod;
575
576 /* Memory barrier necessary as speculative reads of the rx
577 * buffer can be ahead of the index in the status block
578 */
579 rmb();
580
581 DP(NETIF_MSG_RX_STATUS,
582 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
583 fp->index, hw_comp_cons, sw_comp_cons);
584
585 while (sw_comp_cons != hw_comp_cons) {
586 struct sw_rx_bd *rx_buf = NULL;
587 struct sk_buff *skb;
588 union eth_rx_cqe *cqe;
619c5cb6 589 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 590 u8 cqe_fp_flags;
619c5cb6 591 enum eth_rx_cqe_type cqe_fp_type;
9f6c9258
DK
592 u16 len, pad;
593
619c5cb6
VZ
594#ifdef BNX2X_STOP_ON_ERROR
595 if (unlikely(bp->panic))
596 return 0;
597#endif
598
9f6c9258
DK
599 comp_ring_cons = RCQ_BD(sw_comp_cons);
600 bd_prod = RX_BD(bd_prod);
601 bd_cons = RX_BD(bd_cons);
602
603 /* Prefetch the page containing the BD descriptor
604 at producer's index. It will be needed when new skb is
605 allocated */
606 prefetch((void *)(PAGE_ALIGN((unsigned long)
607 (&fp->rx_desc_ring[bd_prod])) -
608 PAGE_SIZE + 1));
609
610 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
611 cqe_fp = &cqe->fast_path_cqe;
612 cqe_fp_flags = cqe_fp->type_error_flags;
613 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258
DK
614
615 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
616 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
617 cqe_fp_flags, cqe_fp->status_flags,
618 le32_to_cpu(cqe_fp->rss_hash_result),
619 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
9f6c9258
DK
620
621 /* is this a slowpath msg? */
619c5cb6 622 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
623 bnx2x_sp_event(fp, cqe);
624 goto next_cqe;
625
626 /* this is an rx packet */
627 } else {
628 rx_buf = &fp->rx_buf_ring[bd_cons];
629 skb = rx_buf->skb;
630 prefetch(skb);
9f6c9258 631
619c5cb6
VZ
632 if (!CQE_TYPE_FAST(cqe_fp_type)) {
633#ifdef BNX2X_STOP_ON_ERROR
634 /* sanity check */
635 if (fp->disable_tpa &&
636 (CQE_TYPE_START(cqe_fp_type) ||
637 CQE_TYPE_STOP(cqe_fp_type)))
638 BNX2X_ERR("START/STOP packet while "
639 "disable_tpa type %x\n",
640 CQE_TYPE(cqe_fp_type));
641#endif
9f6c9258 642
619c5cb6
VZ
643 if (CQE_TYPE_START(cqe_fp_type)) {
644 u16 queue = cqe_fp->queue_index;
9f6c9258
DK
645 DP(NETIF_MSG_RX_STATUS,
646 "calling tpa_start on queue %d\n",
647 queue);
648
649 bnx2x_tpa_start(fp, queue, skb,
619c5cb6
VZ
650 bd_cons, bd_prod,
651 cqe_fp);
9f6c9258 652
619c5cb6 653 /* Set Toeplitz hash for LRO skb */
9f6c9258
DK
654 bnx2x_set_skb_rxhash(bp, cqe, skb);
655
656 goto next_rx;
619c5cb6
VZ
657
658 } else {
659 u16 queue =
660 cqe->end_agg_cqe.queue_index;
9f6c9258
DK
661 DP(NETIF_MSG_RX_STATUS,
662 "calling tpa_stop on queue %d\n",
663 queue);
664
619c5cb6
VZ
665 bnx2x_tpa_stop(bp, fp, queue,
666 &cqe->end_agg_cqe,
667 comp_ring_cons);
9f6c9258
DK
668#ifdef BNX2X_STOP_ON_ERROR
669 if (bp->panic)
670 return 0;
671#endif
672
619c5cb6 673 bnx2x_update_sge_prod(fp, cqe_fp);
9f6c9258
DK
674 goto next_cqe;
675 }
676 }
619c5cb6
VZ
677 /* non TPA */
678 len = le16_to_cpu(cqe_fp->pkt_len);
679 pad = cqe_fp->placement_offset;
9924cafc 680 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 681 dma_unmap_addr(rx_buf, mapping),
619c5cb6
VZ
682 pad + RX_COPY_THRESH,
683 DMA_FROM_DEVICE);
217de5aa 684 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
685
686 /* is this an error packet? */
687 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
688 DP(NETIF_MSG_RX_ERR,
689 "ERROR flags %x rx packet %u\n",
690 cqe_fp_flags, sw_comp_cons);
691 fp->eth_q_stats.rx_err_discard_pkt++;
692 goto reuse_rx;
693 }
694
695 /* Since we don't have a jumbo ring
696 * copy small packets if mtu > 1500
697 */
698 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
699 (len <= RX_COPY_THRESH)) {
700 struct sk_buff *new_skb;
701
619c5cb6 702 new_skb = netdev_alloc_skb(bp->dev, len + pad);
9f6c9258
DK
703 if (new_skb == NULL) {
704 DP(NETIF_MSG_RX_ERR,
705 "ERROR packet dropped "
706 "because of alloc failure\n");
707 fp->eth_q_stats.rx_skb_alloc_failed++;
708 goto reuse_rx;
709 }
710
711 /* aligned copy */
712 skb_copy_from_linear_data_offset(skb, pad,
713 new_skb->data + pad, len);
714 skb_reserve(new_skb, pad);
715 skb_put(new_skb, len);
716
749a8503 717 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
718
719 skb = new_skb;
720
721 } else
722 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
723 dma_unmap_single(&bp->pdev->dev,
724 dma_unmap_addr(rx_buf, mapping),
a8c94b91 725 fp->rx_buf_size,
9f6c9258
DK
726 DMA_FROM_DEVICE);
727 skb_reserve(skb, pad);
728 skb_put(skb, len);
729
730 } else {
731 DP(NETIF_MSG_RX_ERR,
732 "ERROR packet dropped because "
733 "of alloc failure\n");
734 fp->eth_q_stats.rx_skb_alloc_failed++;
735reuse_rx:
749a8503 736 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
737 goto next_rx;
738 }
739
740 skb->protocol = eth_type_trans(skb, bp->dev);
741
742 /* Set Toeplitz hash for a none-LRO skb */
743 bnx2x_set_skb_rxhash(bp, cqe, skb);
744
bc8acf2c 745 skb_checksum_none_assert(skb);
f85582f8 746
66371c44 747 if (bp->dev->features & NETIF_F_RXCSUM) {
619c5cb6 748
9f6c9258
DK
749 if (likely(BNX2X_RX_CSUM_OK(cqe)))
750 skb->ip_summed = CHECKSUM_UNNECESSARY;
751 else
752 fp->eth_q_stats.hw_csum_err++;
753 }
754 }
755
756 skb_record_rx_queue(skb, fp->index);
757
619c5cb6
VZ
758 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
759 PARSING_FLAGS_VLAN)
9bcc0893 760 __vlan_hwaccel_put_tag(skb,
619c5cb6 761 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 762 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
763
764
765next_rx:
766 rx_buf->skb = NULL;
767
768 bd_cons = NEXT_RX_IDX(bd_cons);
769 bd_prod = NEXT_RX_IDX(bd_prod);
770 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
771 rx_pkt++;
772next_cqe:
773 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
774 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
775
776 if (rx_pkt == budget)
777 break;
778 } /* while */
779
780 fp->rx_bd_cons = bd_cons;
781 fp->rx_bd_prod = bd_prod_fw;
782 fp->rx_comp_cons = sw_comp_cons;
783 fp->rx_comp_prod = sw_comp_prod;
784
785 /* Update producers */
786 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
787 fp->rx_sge_prod);
788
789 fp->rx_pkt += rx_pkt;
790 fp->rx_calls++;
791
792 return rx_pkt;
793}
794
795static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
796{
797 struct bnx2x_fastpath *fp = fp_cookie;
798 struct bnx2x *bp = fp->bp;
6383c0b3 799 u8 cos;
9f6c9258 800
523224a3
DK
801 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
802 "[fp %d fw_sd %d igusb %d]\n",
803 fp->index, fp->fw_sb_id, fp->igu_sb_id);
804 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
805
806#ifdef BNX2X_STOP_ON_ERROR
807 if (unlikely(bp->panic))
808 return IRQ_HANDLED;
809#endif
810
811 /* Handle Rx and Tx according to MSI-X vector */
812 prefetch(fp->rx_cons_sb);
6383c0b3
AE
813
814 for_each_cos_in_tx_queue(fp, cos)
815 prefetch(fp->txdata[cos].tx_cons_sb);
816
523224a3 817 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
818 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
819
820 return IRQ_HANDLED;
821}
822
9f6c9258
DK
823/* HW Lock for shared dual port PHYs */
824void bnx2x_acquire_phy_lock(struct bnx2x *bp)
825{
826 mutex_lock(&bp->port.phy_mutex);
827
828 if (bp->port.need_hw_lock)
829 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
830}
831
832void bnx2x_release_phy_lock(struct bnx2x *bp)
833{
834 if (bp->port.need_hw_lock)
835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
836
837 mutex_unlock(&bp->port.phy_mutex);
838}
839
0793f83f
DK
840/* calculates MF speed according to current linespeed and MF configuration */
841u16 bnx2x_get_mf_speed(struct bnx2x *bp)
842{
843 u16 line_speed = bp->link_vars.line_speed;
844 if (IS_MF(bp)) {
faa6fcbb
DK
845 u16 maxCfg = bnx2x_extract_max_cfg(bp,
846 bp->mf_config[BP_VN(bp)]);
847
848 /* Calculate the current MAX line speed limit for the MF
849 * devices
0793f83f 850 */
faa6fcbb
DK
851 if (IS_MF_SI(bp))
852 line_speed = (line_speed * maxCfg) / 100;
853 else { /* SD mode */
0793f83f
DK
854 u16 vn_max_rate = maxCfg * 100;
855
856 if (vn_max_rate < line_speed)
857 line_speed = vn_max_rate;
faa6fcbb 858 }
0793f83f
DK
859 }
860
861 return line_speed;
862}
863
2ae17f66
VZ
864/**
865 * bnx2x_fill_report_data - fill link report data to report
866 *
867 * @bp: driver handle
868 * @data: link state to update
869 *
870 * It uses a none-atomic bit operations because is called under the mutex.
871 */
872static inline void bnx2x_fill_report_data(struct bnx2x *bp,
873 struct bnx2x_link_report_data *data)
874{
875 u16 line_speed = bnx2x_get_mf_speed(bp);
876
877 memset(data, 0, sizeof(*data));
878
879 /* Fill the report data: efective line speed */
880 data->line_speed = line_speed;
881
882 /* Link is down */
883 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
884 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
885 &data->link_report_flags);
886
887 /* Full DUPLEX */
888 if (bp->link_vars.duplex == DUPLEX_FULL)
889 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
890
891 /* Rx Flow Control is ON */
892 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
893 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
894
895 /* Tx Flow Control is ON */
896 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
897 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
898}
899
900/**
901 * bnx2x_link_report - report link status to OS.
902 *
903 * @bp: driver handle
904 *
905 * Calls the __bnx2x_link_report() under the same locking scheme
906 * as a link/PHY state managing code to ensure a consistent link
907 * reporting.
908 */
909
9f6c9258
DK
910void bnx2x_link_report(struct bnx2x *bp)
911{
2ae17f66
VZ
912 bnx2x_acquire_phy_lock(bp);
913 __bnx2x_link_report(bp);
914 bnx2x_release_phy_lock(bp);
915}
9f6c9258 916
2ae17f66
VZ
917/**
918 * __bnx2x_link_report - report link status to OS.
919 *
920 * @bp: driver handle
921 *
922 * None atomic inmlementation.
923 * Should be called under the phy_lock.
924 */
925void __bnx2x_link_report(struct bnx2x *bp)
926{
927 struct bnx2x_link_report_data cur_data;
9f6c9258 928
2ae17f66
VZ
929 /* reread mf_cfg */
930 if (!CHIP_IS_E1(bp))
931 bnx2x_read_mf_cfg(bp);
932
933 /* Read the current link report info */
934 bnx2x_fill_report_data(bp, &cur_data);
935
936 /* Don't report link down or exactly the same link status twice */
937 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
938 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
939 &bp->last_reported_link.link_report_flags) &&
940 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
941 &cur_data.link_report_flags)))
942 return;
943
944 bp->link_cnt++;
9f6c9258 945
2ae17f66
VZ
946 /* We are going to report a new link parameters now -
947 * remember the current data for the next time.
948 */
949 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 950
2ae17f66
VZ
951 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
952 &cur_data.link_report_flags)) {
953 netif_carrier_off(bp->dev);
954 netdev_err(bp->dev, "NIC Link is Down\n");
955 return;
956 } else {
957 netif_carrier_on(bp->dev);
958 netdev_info(bp->dev, "NIC Link is Up, ");
959 pr_cont("%d Mbps ", cur_data.line_speed);
9f6c9258 960
2ae17f66
VZ
961 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
962 &cur_data.link_report_flags))
9f6c9258
DK
963 pr_cont("full duplex");
964 else
965 pr_cont("half duplex");
966
2ae17f66
VZ
967 /* Handle the FC at the end so that only these flags would be
968 * possibly set. This way we may easily check if there is no FC
969 * enabled.
970 */
971 if (cur_data.link_report_flags) {
972 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
973 &cur_data.link_report_flags)) {
9f6c9258 974 pr_cont(", receive ");
2ae17f66
VZ
975 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
976 &cur_data.link_report_flags))
9f6c9258
DK
977 pr_cont("& transmit ");
978 } else {
979 pr_cont(", transmit ");
980 }
981 pr_cont("flow control ON");
982 }
983 pr_cont("\n");
9f6c9258
DK
984 }
985}
986
987void bnx2x_init_rx_rings(struct bnx2x *bp)
988{
989 int func = BP_FUNC(bp);
990 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
619c5cb6 991 ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
523224a3 992 u16 ring_prod;
9f6c9258 993 int i, j;
25141580 994
b3b83c3f 995 /* Allocate TPA resources */
ec6ba945 996 for_each_rx_queue(bp, j) {
523224a3 997 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 998
a8c94b91
VZ
999 DP(NETIF_MSG_IFUP,
1000 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1001
523224a3 1002 if (!fp->disable_tpa) {
619c5cb6 1003 /* Fill the per-aggregtion pool */
9f6c9258 1004 for (i = 0; i < max_agg_queues; i++) {
619c5cb6
VZ
1005 struct bnx2x_agg_info *tpa_info =
1006 &fp->tpa_info[i];
1007 struct sw_rx_bd *first_buf =
1008 &tpa_info->first_buf;
1009
1010 first_buf->skb = netdev_alloc_skb(bp->dev,
1011 fp->rx_buf_size);
1012 if (!first_buf->skb) {
9f6c9258
DK
1013 BNX2X_ERR("Failed to allocate TPA "
1014 "skb pool for queue[%d] - "
1015 "disabling TPA on this "
1016 "queue!\n", j);
1017 bnx2x_free_tpa_pool(bp, fp, i);
1018 fp->disable_tpa = 1;
1019 break;
1020 }
619c5cb6
VZ
1021 dma_unmap_addr_set(first_buf, mapping, 0);
1022 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1023 }
523224a3
DK
1024
1025 /* "next page" elements initialization */
1026 bnx2x_set_next_page_sgl(fp);
1027
1028 /* set SGEs bit mask */
1029 bnx2x_init_sge_ring_bit_mask(fp);
1030
1031 /* Allocate SGEs and initialize the ring elements */
1032 for (i = 0, ring_prod = 0;
1033 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1034
1035 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1036 BNX2X_ERR("was only able to allocate "
1037 "%d rx sges\n", i);
619c5cb6
VZ
1038 BNX2X_ERR("disabling TPA for "
1039 "queue[%d]\n", j);
523224a3 1040 /* Cleanup already allocated elements */
619c5cb6
VZ
1041 bnx2x_free_rx_sge_range(bp, fp,
1042 ring_prod);
1043 bnx2x_free_tpa_pool(bp, fp,
1044 max_agg_queues);
523224a3
DK
1045 fp->disable_tpa = 1;
1046 ring_prod = 0;
1047 break;
1048 }
1049 ring_prod = NEXT_SGE_IDX(ring_prod);
1050 }
1051
1052 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1053 }
1054 }
1055
ec6ba945 1056 for_each_rx_queue(bp, j) {
9f6c9258
DK
1057 struct bnx2x_fastpath *fp = &bp->fp[j];
1058
1059 fp->rx_bd_cons = 0;
9f6c9258 1060
b3b83c3f
DK
1061 /* Activate BD ring */
1062 /* Warning!
1063 * this will generate an interrupt (to the TSTORM)
1064 * must only be done after chip is initialized
1065 */
1066 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1067 fp->rx_sge_prod);
9f6c9258 1068
9f6c9258
DK
1069 if (j != 0)
1070 continue;
1071
619c5cb6 1072 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1073 REG_WR(bp, BAR_USTRORM_INTMEM +
1074 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1075 U64_LO(fp->rx_comp_mapping));
1076 REG_WR(bp, BAR_USTRORM_INTMEM +
1077 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1078 U64_HI(fp->rx_comp_mapping));
1079 }
9f6c9258
DK
1080 }
1081}
f85582f8 1082
9f6c9258
DK
1083static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1084{
1085 int i;
6383c0b3 1086 u8 cos;
9f6c9258 1087
ec6ba945 1088 for_each_tx_queue(bp, i) {
9f6c9258 1089 struct bnx2x_fastpath *fp = &bp->fp[i];
6383c0b3
AE
1090 for_each_cos_in_tx_queue(fp, cos) {
1091 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
9f6c9258 1092
6383c0b3
AE
1093 u16 bd_cons = txdata->tx_bd_cons;
1094 u16 sw_prod = txdata->tx_pkt_prod;
1095 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1096
6383c0b3
AE
1097 while (sw_cons != sw_prod) {
1098 bd_cons = bnx2x_free_tx_pkt(bp, txdata,
1099 TX_BD(sw_cons));
1100 sw_cons++;
1101 }
9f6c9258
DK
1102 }
1103 }
1104}
1105
b3b83c3f
DK
1106static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1107{
1108 struct bnx2x *bp = fp->bp;
1109 int i;
1110
1111 /* ring wasn't allocated */
1112 if (fp->rx_buf_ring == NULL)
1113 return;
1114
1115 for (i = 0; i < NUM_RX_BD; i++) {
1116 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1117 struct sk_buff *skb = rx_buf->skb;
1118
1119 if (skb == NULL)
1120 continue;
b3b83c3f
DK
1121 dma_unmap_single(&bp->pdev->dev,
1122 dma_unmap_addr(rx_buf, mapping),
1123 fp->rx_buf_size, DMA_FROM_DEVICE);
1124
1125 rx_buf->skb = NULL;
1126 dev_kfree_skb(skb);
1127 }
1128}
1129
9f6c9258
DK
1130static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1131{
b3b83c3f 1132 int j;
9f6c9258 1133
ec6ba945 1134 for_each_rx_queue(bp, j) {
9f6c9258
DK
1135 struct bnx2x_fastpath *fp = &bp->fp[j];
1136
b3b83c3f 1137 bnx2x_free_rx_bds(fp);
9f6c9258 1138
9f6c9258
DK
1139 if (!fp->disable_tpa)
1140 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1141 ETH_MAX_AGGREGATION_QUEUES_E1 :
619c5cb6 1142 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
9f6c9258
DK
1143 }
1144}
1145
1146void bnx2x_free_skbs(struct bnx2x *bp)
1147{
1148 bnx2x_free_tx_skbs(bp);
1149 bnx2x_free_rx_skbs(bp);
1150}
1151
e3835b99
DK
1152void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1153{
1154 /* load old values */
1155 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1156
1157 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1158 /* leave all but MAX value */
1159 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1160
1161 /* set new MAX value */
1162 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1163 & FUNC_MF_CFG_MAX_BW_MASK;
1164
1165 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1166 }
1167}
1168
ca92429f
DK
1169/**
1170 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1171 *
1172 * @bp: driver handle
1173 * @nvecs: number of vectors to be released
1174 */
1175static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1176{
ca92429f 1177 int i, offset = 0;
9f6c9258 1178
ca92429f
DK
1179 if (nvecs == offset)
1180 return;
1181 free_irq(bp->msix_table[offset].vector, bp->dev);
9f6c9258 1182 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
ca92429f
DK
1183 bp->msix_table[offset].vector);
1184 offset++;
9f6c9258 1185#ifdef BCM_CNIC
ca92429f
DK
1186 if (nvecs == offset)
1187 return;
9f6c9258
DK
1188 offset++;
1189#endif
ca92429f 1190
ec6ba945 1191 for_each_eth_queue(bp, i) {
ca92429f
DK
1192 if (nvecs == offset)
1193 return;
1194 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1195 "irq\n", i, bp->msix_table[offset].vector);
9f6c9258 1196
ca92429f 1197 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1198 }
1199}
1200
d6214d7a 1201void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1202{
d6214d7a 1203 if (bp->flags & USING_MSIX_FLAG)
ca92429f 1204 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
6383c0b3 1205 CNIC_PRESENT + 1);
d6214d7a
DK
1206 else if (bp->flags & USING_MSI_FLAG)
1207 free_irq(bp->pdev->irq, bp->dev);
1208 else
9f6c9258
DK
1209 free_irq(bp->pdev->irq, bp->dev);
1210}
1211
d6214d7a 1212int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1213{
d6214d7a 1214 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1215
d6214d7a
DK
1216 bp->msix_table[msix_vec].entry = msix_vec;
1217 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1218 bp->msix_table[0].entry);
1219 msix_vec++;
9f6c9258
DK
1220
1221#ifdef BCM_CNIC
d6214d7a
DK
1222 bp->msix_table[msix_vec].entry = msix_vec;
1223 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1224 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1225 msix_vec++;
9f6c9258 1226#endif
6383c0b3 1227 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1228 for_each_eth_queue(bp, i) {
d6214d7a 1229 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1230 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1231 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1232 msix_vec++;
9f6c9258
DK
1233 }
1234
6383c0b3 1235 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
d6214d7a
DK
1236
1237 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1238
1239 /*
1240 * reconfigure number of tx/rx queues according to available
1241 * MSI-X vectors
1242 */
1243 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1244 /* how less vectors we will have? */
1245 int diff = req_cnt - rc;
9f6c9258
DK
1246
1247 DP(NETIF_MSG_IFUP,
1248 "Trying to use less MSI-X vectors: %d\n", rc);
1249
1250 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1251
1252 if (rc) {
1253 DP(NETIF_MSG_IFUP,
1254 "MSI-X is not attainable rc %d\n", rc);
1255 return rc;
1256 }
d6214d7a
DK
1257 /*
1258 * decrease number of queues by number of unallocated entries
1259 */
1260 bp->num_queues -= diff;
9f6c9258
DK
1261
1262 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1263 bp->num_queues);
1264 } else if (rc) {
d6214d7a
DK
1265 /* fall to INTx if not enough memory */
1266 if (rc == -ENOMEM)
1267 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1268 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1269 return rc;
1270 }
1271
1272 bp->flags |= USING_MSIX_FLAG;
1273
1274 return 0;
1275}
1276
1277static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1278{
ca92429f 1279 int i, rc, offset = 0;
9f6c9258 1280
ca92429f
DK
1281 rc = request_irq(bp->msix_table[offset++].vector,
1282 bnx2x_msix_sp_int, 0,
9f6c9258
DK
1283 bp->dev->name, bp->dev);
1284 if (rc) {
1285 BNX2X_ERR("request sp irq failed\n");
1286 return -EBUSY;
1287 }
1288
1289#ifdef BCM_CNIC
1290 offset++;
1291#endif
ec6ba945 1292 for_each_eth_queue(bp, i) {
9f6c9258
DK
1293 struct bnx2x_fastpath *fp = &bp->fp[i];
1294 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1295 bp->dev->name, i);
1296
d6214d7a 1297 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1298 bnx2x_msix_fp_int, 0, fp->name, fp);
1299 if (rc) {
ca92429f
DK
1300 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1301 bp->msix_table[offset].vector, rc);
1302 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1303 return -EBUSY;
1304 }
1305
d6214d7a 1306 offset++;
9f6c9258
DK
1307 }
1308
ec6ba945 1309 i = BNX2X_NUM_ETH_QUEUES(bp);
6383c0b3 1310 offset = 1 + CNIC_PRESENT;
9f6c9258
DK
1311 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1312 " ... fp[%d] %d\n",
1313 bp->msix_table[0].vector,
1314 0, bp->msix_table[offset].vector,
1315 i - 1, bp->msix_table[offset + i - 1].vector);
1316
1317 return 0;
1318}
1319
d6214d7a 1320int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1321{
1322 int rc;
1323
1324 rc = pci_enable_msi(bp->pdev);
1325 if (rc) {
1326 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1327 return -1;
1328 }
1329 bp->flags |= USING_MSI_FLAG;
1330
1331 return 0;
1332}
1333
1334static int bnx2x_req_irq(struct bnx2x *bp)
1335{
1336 unsigned long flags;
1337 int rc;
1338
1339 if (bp->flags & USING_MSI_FLAG)
1340 flags = 0;
1341 else
1342 flags = IRQF_SHARED;
1343
1344 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1345 bp->dev->name, bp->dev);
9f6c9258
DK
1346 return rc;
1347}
1348
619c5cb6
VZ
1349static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1350{
1351 int rc = 0;
1352 if (bp->flags & USING_MSIX_FLAG) {
1353 rc = bnx2x_req_msix_irqs(bp);
1354 if (rc)
1355 return rc;
1356 } else {
1357 bnx2x_ack_int(bp);
1358 rc = bnx2x_req_irq(bp);
1359 if (rc) {
1360 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1361 return rc;
1362 }
1363 if (bp->flags & USING_MSI_FLAG) {
1364 bp->dev->irq = bp->pdev->irq;
1365 netdev_info(bp->dev, "using MSI IRQ %d\n",
1366 bp->pdev->irq);
1367 }
1368 }
1369
1370 return 0;
1371}
1372
1373static inline void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1374{
1375 int i;
1376
619c5cb6 1377 for_each_rx_queue(bp, i)
9f6c9258
DK
1378 napi_enable(&bnx2x_fp(bp, i, napi));
1379}
1380
619c5cb6 1381static inline void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1382{
1383 int i;
1384
619c5cb6 1385 for_each_rx_queue(bp, i)
9f6c9258
DK
1386 napi_disable(&bnx2x_fp(bp, i, napi));
1387}
1388
1389void bnx2x_netif_start(struct bnx2x *bp)
1390{
4b7ed897
DK
1391 if (netif_running(bp->dev)) {
1392 bnx2x_napi_enable(bp);
1393 bnx2x_int_enable(bp);
1394 if (bp->state == BNX2X_STATE_OPEN)
1395 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1396 }
1397}
1398
1399void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1400{
1401 bnx2x_int_disable_sync(bp, disable_hw);
1402 bnx2x_napi_disable(bp);
9f6c9258 1403}
9f6c9258 1404
8307fa3e
VZ
1405u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1406{
8307fa3e 1407 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1408
faa28314 1409#ifdef BCM_CNIC
cdb9d6ae 1410 if (!NO_FCOE(bp)) {
8307fa3e
VZ
1411 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1412 u16 ether_type = ntohs(hdr->h_proto);
1413
1414 /* Skip VLAN tag if present */
1415 if (ether_type == ETH_P_8021Q) {
1416 struct vlan_ethhdr *vhdr =
1417 (struct vlan_ethhdr *)skb->data;
1418
1419 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1420 }
1421
1422 /* If ethertype is FCoE or FIP - use FCoE ring */
1423 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1424 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e
VZ
1425 }
1426#endif
cdb9d6ae 1427 /* select a non-FCoE queue */
6383c0b3 1428 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1429}
1430
d6214d7a
DK
1431void bnx2x_set_num_queues(struct bnx2x *bp)
1432{
1433 switch (bp->multi_mode) {
1434 case ETH_RSS_MODE_DISABLED:
9f6c9258 1435 bp->num_queues = 1;
d6214d7a
DK
1436 break;
1437 case ETH_RSS_MODE_REGULAR:
1438 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258 1439 break;
f85582f8 1440
9f6c9258 1441 default:
d6214d7a 1442 bp->num_queues = 1;
9f6c9258
DK
1443 break;
1444 }
ec6ba945
VZ
1445
1446 /* Add special queues */
6383c0b3 1447 bp->num_queues += NON_ETH_CONTEXT_USE;
ec6ba945
VZ
1448}
1449
cdb9d6ae
VZ
1450/**
1451 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1452 *
1453 * @bp: Driver handle
1454 *
1455 * We currently support for at most 16 Tx queues for each CoS thus we will
1456 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1457 * bp->max_cos.
1458 *
1459 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1460 * index after all ETH L2 indices.
1461 *
1462 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1463 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1464 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1465 *
1466 * The proper configuration of skb->queue_mapping is handled by
1467 * bnx2x_select_queue() and __skb_tx_hash().
1468 *
1469 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1470 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1471 */
ec6ba945
VZ
1472static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1473{
6383c0b3 1474 int rc, tx, rx;
ec6ba945 1475
6383c0b3
AE
1476 tx = MAX_TXQS_PER_COS * bp->max_cos;
1477 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1478
6383c0b3
AE
1479/* account for fcoe queue */
1480#ifdef BCM_CNIC
1481 if (!NO_FCOE(bp)) {
1482 rx += FCOE_PRESENT;
1483 tx += FCOE_PRESENT;
1484 }
ec6ba945 1485#endif
6383c0b3
AE
1486
1487 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1488 if (rc) {
1489 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1490 return rc;
1491 }
1492 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1493 if (rc) {
1494 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1495 return rc;
1496 }
1497
1498 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1499 tx, rx);
1500
ec6ba945
VZ
1501 return rc;
1502}
1503
a8c94b91
VZ
1504static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1505{
1506 int i;
1507
1508 for_each_queue(bp, i) {
1509 struct bnx2x_fastpath *fp = &bp->fp[i];
1510
1511 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1512 if (IS_FCOE_IDX(i))
1513 /*
1514 * Although there are no IP frames expected to arrive to
1515 * this ring we still want to add an
1516 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1517 * overrun attack.
1518 */
1519 fp->rx_buf_size =
1520 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
619c5cb6 1521 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
a8c94b91
VZ
1522 else
1523 fp->rx_buf_size =
619c5cb6
VZ
1524 bp->dev->mtu + ETH_OVREHEAD +
1525 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
a8c94b91
VZ
1526 }
1527}
1528
619c5cb6
VZ
1529static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1530{
1531 int i;
1532 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1533 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1534
1535 /*
1536 * Prepare the inital contents fo the indirection table if RSS is
1537 * enabled
1538 */
1539 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1540 for (i = 0; i < sizeof(ind_table); i++)
1541 ind_table[i] =
1542 bp->fp->cl_id + (i % num_eth_queues);
1543 }
1544
1545 /*
1546 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1547 * per-port, so if explicit configuration is needed , do it only
1548 * for a PMF.
1549 *
1550 * For 57712 and newer on the other hand it's a per-function
1551 * configuration.
1552 */
1553 return bnx2x_config_rss_pf(bp, ind_table,
1554 bp->port.pmf || !CHIP_IS_E1x(bp));
1555}
1556
1557int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1558{
1559 struct bnx2x_config_rss_params params = {0};
1560 int i;
1561
1562 /* Although RSS is meaningless when there is a single HW queue we
1563 * still need it enabled in order to have HW Rx hash generated.
1564 *
1565 * if (!is_eth_multi(bp))
1566 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1567 */
1568
1569 params.rss_obj = &bp->rss_conf_obj;
1570
1571 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1572
1573 /* RSS mode */
1574 switch (bp->multi_mode) {
1575 case ETH_RSS_MODE_DISABLED:
1576 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1577 break;
1578 case ETH_RSS_MODE_REGULAR:
1579 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1580 break;
1581 case ETH_RSS_MODE_VLAN_PRI:
1582 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1583 break;
1584 case ETH_RSS_MODE_E1HOV_PRI:
1585 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1586 break;
1587 case ETH_RSS_MODE_IP_DSCP:
1588 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1589 break;
1590 default:
1591 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1592 return -EINVAL;
1593 }
1594
1595 /* If RSS is enabled */
1596 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1597 /* RSS configuration */
1598 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1599 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1600 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1601 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1602
1603 /* Hash bits */
1604 params.rss_result_mask = MULTI_MASK;
1605
1606 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1607
1608 if (config_hash) {
1609 /* RSS keys */
1610 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1611 params.rss_key[i] = random32();
1612
1613 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1614 }
1615 }
1616
1617 return bnx2x_config_rss(bp, &params);
1618}
1619
1620static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1621{
1622 struct bnx2x_func_state_params func_params = {0};
1623
1624 /* Prepare parameters for function state transitions */
1625 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1626
1627 func_params.f_obj = &bp->func_obj;
1628 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1629
1630 func_params.params.hw_init.load_phase = load_code;
1631
1632 return bnx2x_func_state_change(bp, &func_params);
1633}
1634
1635/*
1636 * Cleans the object that have internal lists without sending
1637 * ramrods. Should be run when interrutps are disabled.
1638 */
1639static void bnx2x_squeeze_objects(struct bnx2x *bp)
1640{
1641 int rc;
1642 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1643 struct bnx2x_mcast_ramrod_params rparam = {0};
1644 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1645
1646 /***************** Cleanup MACs' object first *************************/
1647
1648 /* Wait for completion of requested */
1649 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1650 /* Perform a dry cleanup */
1651 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1652
1653 /* Clean ETH primary MAC */
1654 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1655 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1656 &ramrod_flags);
1657 if (rc != 0)
1658 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1659
1660 /* Cleanup UC list */
1661 vlan_mac_flags = 0;
1662 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1663 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1664 &ramrod_flags);
1665 if (rc != 0)
1666 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1667
1668 /***************** Now clean mcast object *****************************/
1669 rparam.mcast_obj = &bp->mcast_obj;
1670 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1671
1672 /* Add a DEL command... */
1673 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1674 if (rc < 0)
1675 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1676 "object: %d\n", rc);
1677
1678 /* ...and wait until all pending commands are cleared */
1679 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1680 while (rc != 0) {
1681 if (rc < 0) {
1682 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1683 rc);
1684 return;
1685 }
1686
1687 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1688 }
1689}
1690
1691#ifndef BNX2X_STOP_ON_ERROR
1692#define LOAD_ERROR_EXIT(bp, label) \
1693 do { \
1694 (bp)->state = BNX2X_STATE_ERROR; \
1695 goto label; \
1696 } while (0)
1697#else
1698#define LOAD_ERROR_EXIT(bp, label) \
1699 do { \
1700 (bp)->state = BNX2X_STATE_ERROR; \
1701 (bp)->panic = 1; \
1702 return -EBUSY; \
1703 } while (0)
1704#endif
1705
9f6c9258
DK
1706/* must be called with rtnl_lock */
1707int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1708{
619c5cb6 1709 int port = BP_PORT(bp);
9f6c9258
DK
1710 u32 load_code;
1711 int i, rc;
1712
1713#ifdef BNX2X_STOP_ON_ERROR
1714 if (unlikely(bp->panic))
1715 return -EPERM;
1716#endif
1717
1718 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1719
2ae17f66
VZ
1720 /* Set the initial link reported state to link down */
1721 bnx2x_acquire_phy_lock(bp);
1722 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1723 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1724 &bp->last_reported_link.link_report_flags);
1725 bnx2x_release_phy_lock(bp);
1726
523224a3
DK
1727 /* must be called before memory allocation and HW init */
1728 bnx2x_ilt_set_info(bp);
1729
6383c0b3
AE
1730 /*
1731 * Zero fastpath structures preserving invariants like napi, which are
1732 * allocated only once, fp index, max_cos, bp pointer.
1733 * Also set fp->disable_tpa.
b3b83c3f
DK
1734 */
1735 for_each_queue(bp, i)
1736 bnx2x_bz_fp(bp, i);
1737
6383c0b3 1738
a8c94b91
VZ
1739 /* Set the receive queues buffer size */
1740 bnx2x_set_rx_buf_size(bp);
1741
d6214d7a 1742 if (bnx2x_alloc_mem(bp))
9f6c9258 1743 return -ENOMEM;
d6214d7a 1744
b3b83c3f
DK
1745 /* As long as bnx2x_alloc_mem() may possibly update
1746 * bp->num_queues, bnx2x_set_real_num_queues() should always
1747 * come after it.
1748 */
ec6ba945 1749 rc = bnx2x_set_real_num_queues(bp);
d6214d7a 1750 if (rc) {
ec6ba945 1751 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 1752 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
1753 }
1754
6383c0b3
AE
1755 /* configure multi cos mappings in kernel.
1756 * this configuration may be overriden by a multi class queue discipline
1757 * or by a dcbx negotiation result.
1758 */
1759 bnx2x_setup_tc(bp->dev, bp->max_cos);
1760
9f6c9258
DK
1761 bnx2x_napi_enable(bp);
1762
9f6c9258 1763 /* Send LOAD_REQUEST command to MCP
619c5cb6
VZ
1764 * Returns the type of LOAD command:
1765 * if it is the first port to be initialized
1766 * common blocks should be initialized, otherwise - not
1767 */
9f6c9258 1768 if (!BP_NOMCP(bp)) {
a22f0788 1769 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1770 if (!load_code) {
1771 BNX2X_ERR("MCP response failure, aborting\n");
1772 rc = -EBUSY;
619c5cb6 1773 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258
DK
1774 }
1775 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1776 rc = -EBUSY; /* other port in diagnostic mode */
619c5cb6 1777 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258
DK
1778 }
1779
1780 } else {
f2e0899f 1781 int path = BP_PATH(bp);
9f6c9258 1782
f2e0899f
DK
1783 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1784 path, load_count[path][0], load_count[path][1],
1785 load_count[path][2]);
1786 load_count[path][0]++;
1787 load_count[path][1 + port]++;
1788 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1789 path, load_count[path][0], load_count[path][1],
1790 load_count[path][2]);
1791 if (load_count[path][0] == 1)
9f6c9258 1792 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1793 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1794 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1795 else
1796 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1797 }
1798
1799 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1800 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
3deb8167 1801 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
9f6c9258 1802 bp->port.pmf = 1;
3deb8167
YR
1803 /*
1804 * We need the barrier to ensure the ordering between the
1805 * writing to bp->port.pmf here and reading it from the
1806 * bnx2x_periodic_task().
1807 */
1808 smp_mb();
1809 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1810 } else
9f6c9258 1811 bp->port.pmf = 0;
6383c0b3 1812
9f6c9258
DK
1813 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1814
619c5cb6
VZ
1815 /* Init Function state controlling object */
1816 bnx2x__init_func_obj(bp);
1817
9f6c9258
DK
1818 /* Initialize HW */
1819 rc = bnx2x_init_hw(bp, load_code);
1820 if (rc) {
1821 BNX2X_ERR("HW init failed, aborting\n");
a22f0788 1822 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 1823 LOAD_ERROR_EXIT(bp, load_error2);
9f6c9258
DK
1824 }
1825
d6214d7a
DK
1826 /* Connect to IRQs */
1827 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1828 if (rc) {
1829 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 1830 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
1831 }
1832
9f6c9258
DK
1833 /* Setup NIC internals and enable interrupts */
1834 bnx2x_nic_init(bp, load_code);
1835
619c5cb6
VZ
1836 /* Init per-function objects */
1837 bnx2x_init_bp_objs(bp);
1838
f2e0899f
DK
1839 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1840 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
619c5cb6
VZ
1841 (bp->common.shmem2_base)) {
1842 if (SHMEM2_HAS(bp, dcc_support))
1843 SHMEM2_WR(bp, dcc_support,
1844 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1845 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1846 }
1847
1848 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1849 rc = bnx2x_func_start(bp);
1850 if (rc) {
1851 BNX2X_ERR("Function start failed!\n");
c636322b 1852 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6
VZ
1853 LOAD_ERROR_EXIT(bp, load_error3);
1854 }
9f6c9258
DK
1855
1856 /* Send LOAD_DONE command to MCP */
1857 if (!BP_NOMCP(bp)) {
a22f0788 1858 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1859 if (!load_code) {
1860 BNX2X_ERR("MCP response failure, aborting\n");
1861 rc = -EBUSY;
619c5cb6 1862 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258
DK
1863 }
1864 }
1865
619c5cb6 1866 rc = bnx2x_setup_leading(bp);
9f6c9258
DK
1867 if (rc) {
1868 BNX2X_ERR("Setup leading failed!\n");
619c5cb6 1869 LOAD_ERROR_EXIT(bp, load_error3);
f2e0899f 1870 }
9f6c9258 1871
9f6c9258 1872#ifdef BCM_CNIC
523224a3 1873 /* Enable Timer scan */
619c5cb6 1874 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
9f6c9258 1875#endif
f85582f8 1876
523224a3 1877 for_each_nondefault_queue(bp, i) {
619c5cb6 1878 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
523224a3 1879 if (rc)
619c5cb6 1880 LOAD_ERROR_EXIT(bp, load_error4);
523224a3
DK
1881 }
1882
619c5cb6
VZ
1883 rc = bnx2x_init_rss_pf(bp);
1884 if (rc)
1885 LOAD_ERROR_EXIT(bp, load_error4);
1886
523224a3
DK
1887 /* Now when Clients are configured we are ready to work */
1888 bp->state = BNX2X_STATE_OPEN;
1889
619c5cb6
VZ
1890 /* Configure a ucast MAC */
1891 rc = bnx2x_set_eth_mac(bp, true);
1892 if (rc)
1893 LOAD_ERROR_EXIT(bp, load_error4);
6e30dd4e 1894
e3835b99
DK
1895 if (bp->pending_max) {
1896 bnx2x_update_max_mf_config(bp, bp->pending_max);
1897 bp->pending_max = 0;
1898 }
1899
9f6c9258
DK
1900 if (bp->port.pmf)
1901 bnx2x_initial_phy_init(bp, load_mode);
1902
619c5cb6
VZ
1903 /* Start fast path */
1904
1905 /* Initialize Rx filter. */
1906 netif_addr_lock_bh(bp->dev);
6e30dd4e 1907 bnx2x_set_rx_mode(bp->dev);
619c5cb6 1908 netif_addr_unlock_bh(bp->dev);
6e30dd4e 1909
619c5cb6 1910 /* Start the Tx */
9f6c9258
DK
1911 switch (load_mode) {
1912 case LOAD_NORMAL:
523224a3
DK
1913 /* Tx queue should be only reenabled */
1914 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1915 break;
1916
1917 case LOAD_OPEN:
1918 netif_tx_start_all_queues(bp->dev);
523224a3 1919 smp_mb__after_clear_bit();
9f6c9258
DK
1920 break;
1921
1922 case LOAD_DIAG:
9f6c9258
DK
1923 bp->state = BNX2X_STATE_DIAG;
1924 break;
1925
1926 default:
1927 break;
1928 }
1929
1930 if (!bp->port.pmf)
1931 bnx2x__link_status_update(bp);
1932
1933 /* start the timer */
1934 mod_timer(&bp->timer, jiffies + bp->current_interval);
1935
1936#ifdef BCM_CNIC
1937 bnx2x_setup_cnic_irq_info(bp);
1938 if (bp->state == BNX2X_STATE_OPEN)
1939 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1940#endif
1941 bnx2x_inc_load_cnt(bp);
1942
619c5cb6
VZ
1943 /* Wait for all pending SP commands to complete */
1944 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1945 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1946 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1947 return -EBUSY;
1948 }
6891dd25 1949
619c5cb6 1950 bnx2x_dcbx_init(bp);
9f6c9258
DK
1951 return 0;
1952
619c5cb6 1953#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 1954load_error4:
619c5cb6 1955#ifdef BCM_CNIC
9f6c9258 1956 /* Disable Timer scan */
619c5cb6 1957 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9f6c9258
DK
1958#endif
1959load_error3:
1960 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1961
619c5cb6
VZ
1962 /* Clean queueable objects */
1963 bnx2x_squeeze_objects(bp);
1964
9f6c9258
DK
1965 /* Free SKBs, SGEs, TPA pool and driver internals */
1966 bnx2x_free_skbs(bp);
ec6ba945 1967 for_each_rx_queue(bp, i)
9f6c9258 1968 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1969
9f6c9258 1970 /* Release IRQs */
d6214d7a
DK
1971 bnx2x_free_irq(bp);
1972load_error2:
1973 if (!BP_NOMCP(bp)) {
1974 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1975 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1976 }
1977
1978 bp->port.pmf = 0;
9f6c9258
DK
1979load_error1:
1980 bnx2x_napi_disable(bp);
d6214d7a 1981load_error0:
9f6c9258
DK
1982 bnx2x_free_mem(bp);
1983
1984 return rc;
619c5cb6 1985#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
1986}
1987
1988/* must be called with rtnl_lock */
1989int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1990{
1991 int i;
c9ee9206
VZ
1992 bool global = false;
1993
1994 if ((bp->state == BNX2X_STATE_CLOSED) ||
1995 (bp->state == BNX2X_STATE_ERROR)) {
1996 /* We can get here if the driver has been unloaded
1997 * during parity error recovery and is either waiting for a
1998 * leader to complete or for other functions to unload and
1999 * then ifdown has been issued. In this case we want to
2000 * unload and let other functions to complete a recovery
2001 * process.
2002 */
9f6c9258
DK
2003 bp->recovery_state = BNX2X_RECOVERY_DONE;
2004 bp->is_leader = 0;
c9ee9206
VZ
2005 bnx2x_release_leader_lock(bp);
2006 smp_mb();
2007
2008 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
9f6c9258
DK
2009
2010 return -EINVAL;
2011 }
2012
87b7ba3d
VZ
2013 /*
2014 * It's important to set the bp->state to the value different from
2015 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2016 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2017 */
2018 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2019 smp_mb();
2020
9505ee37
VZ
2021 /* Stop Tx */
2022 bnx2x_tx_disable(bp);
2023
9f6c9258
DK
2024#ifdef BCM_CNIC
2025 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2026#endif
9f6c9258 2027
9f6c9258 2028 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2029
9f6c9258 2030 del_timer_sync(&bp->timer);
f85582f8 2031
619c5cb6
VZ
2032 /* Set ALWAYS_ALIVE bit in shmem */
2033 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2034
2035 bnx2x_drv_pulse(bp);
9f6c9258 2036
f85582f8 2037 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9f6c9258
DK
2038
2039 /* Cleanup the chip if needed */
2040 if (unload_mode != UNLOAD_RECOVERY)
2041 bnx2x_chip_cleanup(bp, unload_mode);
523224a3 2042 else {
c9ee9206
VZ
2043 /* Send the UNLOAD_REQUEST to the MCP */
2044 bnx2x_send_unload_req(bp, unload_mode);
2045
2046 /*
2047 * Prevent transactions to host from the functions on the
2048 * engine that doesn't reset global blocks in case of global
2049 * attention once gloabl blocks are reset and gates are opened
2050 * (the engine which leader will perform the recovery
2051 * last).
2052 */
2053 if (!CHIP_IS_E1x(bp))
2054 bnx2x_pf_disable(bp);
2055
2056 /* Disable HW interrupts, NAPI */
523224a3
DK
2057 bnx2x_netif_stop(bp, 1);
2058
2059 /* Release IRQs */
d6214d7a 2060 bnx2x_free_irq(bp);
c9ee9206
VZ
2061
2062 /* Report UNLOAD_DONE to MCP */
2063 bnx2x_send_unload_done(bp);
523224a3 2064 }
9f6c9258 2065
619c5cb6
VZ
2066 /*
2067 * At this stage no more interrupts will arrive so we may safly clean
2068 * the queueable objects here in case they failed to get cleaned so far.
2069 */
2070 bnx2x_squeeze_objects(bp);
2071
79616895
VZ
2072 /* There should be no more pending SP commands at this stage */
2073 bp->sp_state = 0;
2074
9f6c9258
DK
2075 bp->port.pmf = 0;
2076
2077 /* Free SKBs, SGEs, TPA pool and driver internals */
2078 bnx2x_free_skbs(bp);
ec6ba945 2079 for_each_rx_queue(bp, i)
9f6c9258 2080 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2081
9f6c9258
DK
2082 bnx2x_free_mem(bp);
2083
2084 bp->state = BNX2X_STATE_CLOSED;
2085
c9ee9206
VZ
2086 /* Check if there are pending parity attentions. If there are - set
2087 * RECOVERY_IN_PROGRESS.
2088 */
2089 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2090 bnx2x_set_reset_in_progress(bp);
2091
2092 /* Set RESET_IS_GLOBAL if needed */
2093 if (global)
2094 bnx2x_set_reset_global(bp);
2095 }
2096
2097
9f6c9258
DK
2098 /* The last driver must disable a "close the gate" if there is no
2099 * parity attention or "process kill" pending.
2100 */
c9ee9206 2101 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2102 bnx2x_disable_close_the_gate(bp);
2103
9f6c9258
DK
2104 return 0;
2105}
f85582f8 2106
9f6c9258
DK
2107int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2108{
2109 u16 pmcsr;
2110
adf5f6a1
DK
2111 /* If there is no power capability, silently succeed */
2112 if (!bp->pm_cap) {
2113 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2114 return 0;
2115 }
2116
9f6c9258
DK
2117 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2118
2119 switch (state) {
2120 case PCI_D0:
2121 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2122 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2123 PCI_PM_CTRL_PME_STATUS));
2124
2125 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2126 /* delay required during transition out of D3hot */
2127 msleep(20);
2128 break;
2129
2130 case PCI_D3hot:
2131 /* If there are other clients above don't
2132 shut down the power */
2133 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2134 return 0;
2135 /* Don't shut down the power for emulation and FPGA */
2136 if (CHIP_REV_IS_SLOW(bp))
2137 return 0;
2138
2139 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2140 pmcsr |= 3;
2141
2142 if (bp->wol)
2143 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2144
2145 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2146 pmcsr);
2147
2148 /* No more memory access after this point until
2149 * device is brought back to D0.
2150 */
2151 break;
2152
2153 default:
2154 return -EINVAL;
2155 }
2156 return 0;
2157}
2158
9f6c9258
DK
2159/*
2160 * net_device service functions
2161 */
d6214d7a 2162int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
2163{
2164 int work_done = 0;
6383c0b3 2165 u8 cos;
9f6c9258
DK
2166 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2167 napi);
2168 struct bnx2x *bp = fp->bp;
2169
2170 while (1) {
2171#ifdef BNX2X_STOP_ON_ERROR
2172 if (unlikely(bp->panic)) {
2173 napi_complete(napi);
2174 return 0;
2175 }
2176#endif
2177
6383c0b3
AE
2178 for_each_cos_in_tx_queue(fp, cos)
2179 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2180 bnx2x_tx_int(bp, &fp->txdata[cos]);
2181
9f6c9258
DK
2182
2183 if (bnx2x_has_rx_work(fp)) {
2184 work_done += bnx2x_rx_int(fp, budget - work_done);
2185
2186 /* must not complete if we consumed full budget */
2187 if (work_done >= budget)
2188 break;
2189 }
2190
2191 /* Fall out from the NAPI loop if needed */
2192 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
ec6ba945
VZ
2193#ifdef BCM_CNIC
2194 /* No need to update SB for FCoE L2 ring as long as
2195 * it's connected to the default SB and the SB
2196 * has been updated when NAPI was scheduled.
2197 */
2198 if (IS_FCOE_FP(fp)) {
2199 napi_complete(napi);
2200 break;
2201 }
2202#endif
2203
9f6c9258 2204 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
2205 /* bnx2x_has_rx_work() reads the status block,
2206 * thus we need to ensure that status block indices
2207 * have been actually read (bnx2x_update_fpsb_idx)
2208 * prior to this check (bnx2x_has_rx_work) so that
2209 * we won't write the "newer" value of the status block
2210 * to IGU (if there was a DMA right after
2211 * bnx2x_has_rx_work and if there is no rmb, the memory
2212 * reading (bnx2x_update_fpsb_idx) may be postponed
2213 * to right before bnx2x_ack_sb). In this case there
2214 * will never be another interrupt until there is
2215 * another update of the status block, while there
2216 * is still unhandled work.
2217 */
9f6c9258
DK
2218 rmb();
2219
2220 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2221 napi_complete(napi);
2222 /* Re-enable interrupts */
523224a3
DK
2223 DP(NETIF_MSG_HW,
2224 "Update index to %d\n", fp->fp_hc_idx);
2225 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2226 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
2227 IGU_INT_ENABLE, 1);
2228 break;
2229 }
2230 }
2231 }
2232
2233 return work_done;
2234}
2235
9f6c9258
DK
2236/* we split the first BD into headers and data BDs
2237 * to ease the pain of our fellow microcode engineers
2238 * we use one mapping for both BDs
2239 * So far this has only been observed to happen
2240 * in Other Operating Systems(TM)
2241 */
2242static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
6383c0b3 2243 struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
2244 struct sw_tx_bd *tx_buf,
2245 struct eth_tx_start_bd **tx_bd, u16 hlen,
2246 u16 bd_prod, int nbd)
2247{
2248 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2249 struct eth_tx_bd *d_tx_bd;
2250 dma_addr_t mapping;
2251 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2252
2253 /* first fix first BD */
2254 h_tx_bd->nbd = cpu_to_le16(nbd);
2255 h_tx_bd->nbytes = cpu_to_le16(hlen);
2256
2257 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2258 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2259 h_tx_bd->addr_lo, h_tx_bd->nbd);
2260
2261 /* now get a new data BD
2262 * (after the pbd) and fill it */
2263 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 2264 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
2265
2266 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2267 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2268
2269 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2270 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2271 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2272
2273 /* this marks the BD as one that has no individual mapping */
2274 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2275
2276 DP(NETIF_MSG_TX_QUEUED,
2277 "TSO split data size is %d (%x:%x)\n",
2278 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2279
2280 /* update tx_bd */
2281 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2282
2283 return bd_prod;
2284}
2285
2286static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2287{
2288 if (fix > 0)
2289 csum = (u16) ~csum_fold(csum_sub(csum,
2290 csum_partial(t_header - fix, fix, 0)));
2291
2292 else if (fix < 0)
2293 csum = (u16) ~csum_fold(csum_add(csum,
2294 csum_partial(t_header, -fix, 0)));
2295
2296 return swab16(csum);
2297}
2298
2299static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2300{
2301 u32 rc;
2302
2303 if (skb->ip_summed != CHECKSUM_PARTIAL)
2304 rc = XMIT_PLAIN;
2305
2306 else {
d0d9d8ef 2307 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
2308 rc = XMIT_CSUM_V6;
2309 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2310 rc |= XMIT_CSUM_TCP;
2311
2312 } else {
2313 rc = XMIT_CSUM_V4;
2314 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2315 rc |= XMIT_CSUM_TCP;
2316 }
2317 }
2318
5892b9e9
VZ
2319 if (skb_is_gso_v6(skb))
2320 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2321 else if (skb_is_gso(skb))
2322 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
2323
2324 return rc;
2325}
2326
2327#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2328/* check if packet requires linearization (packet is too fragmented)
2329 no need to check fragmentation if page size > 8K (there will be no
2330 violation to FW restrictions) */
2331static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2332 u32 xmit_type)
2333{
2334 int to_copy = 0;
2335 int hlen = 0;
2336 int first_bd_sz = 0;
2337
2338 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2339 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2340
2341 if (xmit_type & XMIT_GSO) {
2342 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2343 /* Check if LSO packet needs to be copied:
2344 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2345 int wnd_size = MAX_FETCH_BD - 3;
2346 /* Number of windows to check */
2347 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2348 int wnd_idx = 0;
2349 int frag_idx = 0;
2350 u32 wnd_sum = 0;
2351
2352 /* Headers length */
2353 hlen = (int)(skb_transport_header(skb) - skb->data) +
2354 tcp_hdrlen(skb);
2355
2356 /* Amount of data (w/o headers) on linear part of SKB*/
2357 first_bd_sz = skb_headlen(skb) - hlen;
2358
2359 wnd_sum = first_bd_sz;
2360
2361 /* Calculate the first sum - it's special */
2362 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2363 wnd_sum +=
2364 skb_shinfo(skb)->frags[frag_idx].size;
2365
2366 /* If there was data on linear skb data - check it */
2367 if (first_bd_sz > 0) {
2368 if (unlikely(wnd_sum < lso_mss)) {
2369 to_copy = 1;
2370 goto exit_lbl;
2371 }
2372
2373 wnd_sum -= first_bd_sz;
2374 }
2375
2376 /* Others are easier: run through the frag list and
2377 check all windows */
2378 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2379 wnd_sum +=
2380 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2381
2382 if (unlikely(wnd_sum < lso_mss)) {
2383 to_copy = 1;
2384 break;
2385 }
2386 wnd_sum -=
2387 skb_shinfo(skb)->frags[wnd_idx].size;
2388 }
2389 } else {
2390 /* in non-LSO too fragmented packet should always
2391 be linearized */
2392 to_copy = 1;
2393 }
2394 }
2395
2396exit_lbl:
2397 if (unlikely(to_copy))
2398 DP(NETIF_MSG_TX_QUEUED,
2399 "Linearization IS REQUIRED for %s packet. "
2400 "num_frags %d hlen %d first_bd_sz %d\n",
2401 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2402 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2403
2404 return to_copy;
2405}
2406#endif
2407
2297a2da
VZ
2408static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2409 u32 xmit_type)
f2e0899f 2410{
2297a2da
VZ
2411 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2412 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2413 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
2414 if ((xmit_type & XMIT_GSO_V6) &&
2415 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 2416 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
2417}
2418
2419/**
e8920674 2420 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 2421 *
e8920674
DK
2422 * @skb: packet skb
2423 * @pbd: parse BD
2424 * @xmit_type: xmit flags
f2e0899f
DK
2425 */
2426static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2427 struct eth_tx_parse_bd_e1x *pbd,
2428 u32 xmit_type)
2429{
2430 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2431 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2432 pbd->tcp_flags = pbd_tcp_flags(skb);
2433
2434 if (xmit_type & XMIT_GSO_V4) {
2435 pbd->ip_id = swab16(ip_hdr(skb)->id);
2436 pbd->tcp_pseudo_csum =
2437 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2438 ip_hdr(skb)->daddr,
2439 0, IPPROTO_TCP, 0));
2440
2441 } else
2442 pbd->tcp_pseudo_csum =
2443 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2444 &ipv6_hdr(skb)->daddr,
2445 0, IPPROTO_TCP, 0));
2446
2447 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2448}
f85582f8 2449
f2e0899f 2450/**
e8920674 2451 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 2452 *
e8920674
DK
2453 * @bp: driver handle
2454 * @skb: packet skb
2455 * @parsing_data: data to be updated
2456 * @xmit_type: xmit flags
f2e0899f 2457 *
e8920674 2458 * 57712 related
f2e0899f
DK
2459 */
2460static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 2461 u32 *parsing_data, u32 xmit_type)
f2e0899f 2462{
e39aece7
VZ
2463 *parsing_data |=
2464 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2465 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2466 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 2467
e39aece7
VZ
2468 if (xmit_type & XMIT_CSUM_TCP) {
2469 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2470 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2471 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 2472
e39aece7
VZ
2473 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2474 } else
2475 /* We support checksum offload for TCP and UDP only.
2476 * No need to pass the UDP header length - it's a constant.
2477 */
2478 return skb_transport_header(skb) +
2479 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
2480}
2481
93ef5c02
DK
2482static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2483 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2484{
93ef5c02
DK
2485 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2486
2487 if (xmit_type & XMIT_CSUM_V4)
2488 tx_start_bd->bd_flags.as_bitfield |=
2489 ETH_TX_BD_FLAGS_IP_CSUM;
2490 else
2491 tx_start_bd->bd_flags.as_bitfield |=
2492 ETH_TX_BD_FLAGS_IPV6;
2493
2494 if (!(xmit_type & XMIT_CSUM_TCP))
2495 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
2496}
2497
f2e0899f 2498/**
e8920674 2499 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 2500 *
e8920674
DK
2501 * @bp: driver handle
2502 * @skb: packet skb
2503 * @pbd: parse BD to be updated
2504 * @xmit_type: xmit flags
f2e0899f
DK
2505 */
2506static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2507 struct eth_tx_parse_bd_e1x *pbd,
2508 u32 xmit_type)
2509{
e39aece7 2510 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
2511
2512 /* for now NS flag is not used in Linux */
2513 pbd->global_data =
2514 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2515 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2516
2517 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 2518 skb_network_header(skb)) >> 1;
f2e0899f 2519
e39aece7
VZ
2520 hlen += pbd->ip_hlen_w;
2521
2522 /* We support checksum offload for TCP and UDP only */
2523 if (xmit_type & XMIT_CSUM_TCP)
2524 hlen += tcp_hdrlen(skb) / 2;
2525 else
2526 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
2527
2528 pbd->total_hlen_w = cpu_to_le16(hlen);
2529 hlen = hlen*2;
2530
2531 if (xmit_type & XMIT_CSUM_TCP) {
2532 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2533
2534 } else {
2535 s8 fix = SKB_CS_OFF(skb); /* signed! */
2536
2537 DP(NETIF_MSG_TX_QUEUED,
2538 "hlen %d fix %d csum before fix %x\n",
2539 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2540
2541 /* HW bug: fixup the CSUM */
2542 pbd->tcp_pseudo_csum =
2543 bnx2x_csum_fix(skb_transport_header(skb),
2544 SKB_CS(skb), fix);
2545
2546 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2547 pbd->tcp_pseudo_csum);
2548 }
2549
2550 return hlen;
2551}
f85582f8 2552
9f6c9258
DK
2553/* called with netif_tx_lock
2554 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2555 * netif_wake_queue()
2556 */
2557netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2558{
2559 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 2560
9f6c9258
DK
2561 struct bnx2x_fastpath *fp;
2562 struct netdev_queue *txq;
6383c0b3 2563 struct bnx2x_fp_txdata *txdata;
9f6c9258 2564 struct sw_tx_bd *tx_buf;
619c5cb6 2565 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 2566 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 2567 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 2568 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 2569 u32 pbd_e2_parsing_data = 0;
9f6c9258 2570 u16 pkt_prod, bd_prod;
6383c0b3 2571 int nbd, txq_index, fp_index, txdata_index;
9f6c9258
DK
2572 dma_addr_t mapping;
2573 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2574 int i;
2575 u8 hlen = 0;
2576 __le16 pkt_size = 0;
2577 struct ethhdr *eth;
2578 u8 mac_type = UNICAST_ADDRESS;
2579
2580#ifdef BNX2X_STOP_ON_ERROR
2581 if (unlikely(bp->panic))
2582 return NETDEV_TX_BUSY;
2583#endif
2584
6383c0b3
AE
2585 txq_index = skb_get_queue_mapping(skb);
2586 txq = netdev_get_tx_queue(dev, txq_index);
2587
2588 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2589
2590 /* decode the fastpath index and the cos index from the txq */
2591 fp_index = TXQ_TO_FP(txq_index);
2592 txdata_index = TXQ_TO_COS(txq_index);
2593
2594#ifdef BCM_CNIC
2595 /*
2596 * Override the above for the FCoE queue:
2597 * - FCoE fp entry is right after the ETH entries.
2598 * - FCoE L2 queue uses bp->txdata[0] only.
2599 */
2600 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2601 bnx2x_fcoe_tx(bp, txq_index)))) {
2602 fp_index = FCOE_IDX;
2603 txdata_index = 0;
2604 }
2605#endif
2606
2607 /* enable this debug print to view the transmission queue being used
2608 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
2609 txq_index, fp_index, txdata_index); */
9f6c9258 2610
6383c0b3 2611 /* locate the fastpath and the txdata */
9f6c9258 2612 fp = &bp->fp[fp_index];
6383c0b3
AE
2613 txdata = &fp->txdata[txdata_index];
2614
2615 /* enable this debug print to view the tranmission details
2616 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2617 " tx_data ptr %p fp pointer %p",
2618 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 2619
6383c0b3
AE
2620 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2621 (skb_shinfo(skb)->nr_frags + 3))) {
9f6c9258
DK
2622 fp->eth_q_stats.driver_xoff++;
2623 netif_tx_stop_queue(txq);
2624 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2625 return NETDEV_TX_BUSY;
2626 }
2627
f2e0899f
DK
2628 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2629 "protocol(%x,%x) gso type %x xmit_type %x\n",
6383c0b3 2630 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
2631 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2632
2633 eth = (struct ethhdr *)skb->data;
2634
2635 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2636 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2637 if (is_broadcast_ether_addr(eth->h_dest))
2638 mac_type = BROADCAST_ADDRESS;
2639 else
2640 mac_type = MULTICAST_ADDRESS;
2641 }
2642
2643#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2644 /* First, check if we need to linearize the skb (due to FW
2645 restrictions). No need to check fragmentation if page size > 8K
2646 (there will be no violation to FW restrictions) */
2647 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2648 /* Statistics of linearization */
2649 bp->lin_cnt++;
2650 if (skb_linearize(skb) != 0) {
2651 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2652 "silently dropping this SKB\n");
2653 dev_kfree_skb_any(skb);
2654 return NETDEV_TX_OK;
2655 }
2656 }
2657#endif
619c5cb6
VZ
2658 /* Map skb linear data for DMA */
2659 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2660 skb_headlen(skb), DMA_TO_DEVICE);
2661 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2662 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2663 "silently dropping this SKB\n");
2664 dev_kfree_skb_any(skb);
2665 return NETDEV_TX_OK;
2666 }
9f6c9258
DK
2667 /*
2668 Please read carefully. First we use one BD which we mark as start,
2669 then we have a parsing info BD (used for TSO or xsum),
2670 and only then we have the rest of the TSO BDs.
2671 (don't forget to mark the last one as last,
2672 and to unmap only AFTER you write to the BD ...)
2673 And above all, all pdb sizes are in words - NOT DWORDS!
2674 */
2675
619c5cb6
VZ
2676 /* get current pkt produced now - advance it just before sending packet
2677 * since mapping of pages may fail and cause packet to be dropped
2678 */
6383c0b3
AE
2679 pkt_prod = txdata->tx_pkt_prod;
2680 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 2681
619c5cb6
VZ
2682 /* get a tx_buf and first BD
2683 * tx_start_bd may be changed during SPLIT,
2684 * but first_bd will always stay first
2685 */
6383c0b3
AE
2686 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2687 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 2688 first_bd = tx_start_bd;
9f6c9258
DK
2689
2690 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2691 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2692 mac_type);
2693
9f6c9258 2694 /* header nbd */
f85582f8 2695 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2696
2697 /* remember the first BD of the packet */
6383c0b3 2698 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
2699 tx_buf->skb = skb;
2700 tx_buf->flags = 0;
2701
2702 DP(NETIF_MSG_TX_QUEUED,
2703 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 2704 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 2705
eab6d18d 2706 if (vlan_tx_tag_present(skb)) {
523224a3
DK
2707 tx_start_bd->vlan_or_ethertype =
2708 cpu_to_le16(vlan_tx_tag_get(skb));
2709 tx_start_bd->bd_flags.as_bitfield |=
2710 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 2711 } else
523224a3 2712 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2713
2714 /* turn on parsing and get a BD */
2715 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2716
93ef5c02
DK
2717 if (xmit_type & XMIT_CSUM)
2718 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 2719
619c5cb6 2720 if (!CHIP_IS_E1x(bp)) {
6383c0b3 2721 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f
DK
2722 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2723 /* Set PBD in checksum offload case */
2724 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
2725 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2726 &pbd_e2_parsing_data,
2727 xmit_type);
619c5cb6
VZ
2728 if (IS_MF_SI(bp)) {
2729 /*
2730 * fill in the MAC addresses in the PBD - for local
2731 * switching
2732 */
2733 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2734 &pbd_e2->src_mac_addr_mid,
2735 &pbd_e2->src_mac_addr_lo,
2736 eth->h_source);
2737 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2738 &pbd_e2->dst_mac_addr_mid,
2739 &pbd_e2->dst_mac_addr_lo,
2740 eth->h_dest);
2741 }
f2e0899f 2742 } else {
6383c0b3 2743 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
2744 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2745 /* Set PBD in checksum offload case */
2746 if (xmit_type & XMIT_CSUM)
2747 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2748
9f6c9258
DK
2749 }
2750
f85582f8 2751 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
2752 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2753 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
619c5cb6 2754 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
9f6c9258
DK
2755 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2756 pkt_size = tx_start_bd->nbytes;
2757
2758 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2759 " nbytes %d flags %x vlan %x\n",
2760 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2761 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2762 tx_start_bd->bd_flags.as_bitfield,
2763 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2764
2765 if (xmit_type & XMIT_GSO) {
2766
2767 DP(NETIF_MSG_TX_QUEUED,
2768 "TSO packet len %d hlen %d total len %d tso size %d\n",
2769 skb->len, hlen, skb_headlen(skb),
2770 skb_shinfo(skb)->gso_size);
2771
2772 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2773
2774 if (unlikely(skb_headlen(skb) > hlen))
6383c0b3
AE
2775 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2776 &tx_start_bd, hlen,
2777 bd_prod, ++nbd);
619c5cb6 2778 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
2779 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2780 xmit_type);
f2e0899f
DK
2781 else
2782 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 2783 }
2297a2da
VZ
2784
2785 /* Set the PBD's parsing_data field if not zero
2786 * (for the chips newer than 57711).
2787 */
2788 if (pbd_e2_parsing_data)
2789 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2790
9f6c9258
DK
2791 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2792
f85582f8 2793 /* Handle fragmented skb */
9f6c9258
DK
2794 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2795 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2796
619c5cb6
VZ
2797 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2798 frag->page_offset, frag->size,
2799 DMA_TO_DEVICE);
2800 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2801
2802 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2803 "dropping packet...\n");
2804
2805 /* we need unmap all buffers already mapped
2806 * for this SKB;
2807 * first_bd->nbd need to be properly updated
2808 * before call to bnx2x_free_tx_pkt
2809 */
2810 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3
AE
2811 bnx2x_free_tx_pkt(bp, txdata,
2812 TX_BD(txdata->tx_pkt_prod));
619c5cb6
VZ
2813 return NETDEV_TX_OK;
2814 }
2815
9f6c9258 2816 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 2817 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 2818 if (total_pkt_bd == NULL)
6383c0b3 2819 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 2820
9f6c9258
DK
2821 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2822 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2823 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2824 le16_add_cpu(&pkt_size, frag->size);
619c5cb6 2825 nbd++;
9f6c9258
DK
2826
2827 DP(NETIF_MSG_TX_QUEUED,
2828 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2829 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2830 le16_to_cpu(tx_data_bd->nbytes));
2831 }
2832
2833 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2834
619c5cb6
VZ
2835 /* update with actual num BDs */
2836 first_bd->nbd = cpu_to_le16(nbd);
2837
9f6c9258
DK
2838 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2839
2840 /* now send a tx doorbell, counting the next BD
2841 * if the packet contains or ends with it
2842 */
2843 if (TX_BD_POFF(bd_prod) < nbd)
2844 nbd++;
2845
619c5cb6
VZ
2846 /* total_pkt_bytes should be set on the first data BD if
2847 * it's not an LSO packet and there is more than one
2848 * data BD. In this case pkt_size is limited by an MTU value.
2849 * However we prefer to set it for an LSO packet (while we don't
2850 * have to) in order to save some CPU cycles in a none-LSO
2851 * case, when we much more care about them.
2852 */
9f6c9258
DK
2853 if (total_pkt_bd != NULL)
2854 total_pkt_bd->total_pkt_bytes = pkt_size;
2855
523224a3 2856 if (pbd_e1x)
9f6c9258 2857 DP(NETIF_MSG_TX_QUEUED,
523224a3 2858 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2859 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2860 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2861 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2862 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2863 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2864 if (pbd_e2)
2865 DP(NETIF_MSG_TX_QUEUED,
2866 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2867 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2868 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2869 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2870 pbd_e2->parsing_data);
9f6c9258
DK
2871 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2872
6383c0b3 2873 txdata->tx_pkt_prod++;
9f6c9258
DK
2874 /*
2875 * Make sure that the BD data is updated before updating the producer
2876 * since FW might read the BD right after the producer is updated.
2877 * This is only applicable for weak-ordered memory model archs such
2878 * as IA-64. The following barrier is also mandatory since FW will
2879 * assumes packets must have BDs.
2880 */
2881 wmb();
2882
6383c0b3 2883 txdata->tx_db.data.prod += nbd;
9f6c9258 2884 barrier();
f85582f8 2885
6383c0b3 2886 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
2887
2888 mmiowb();
2889
6383c0b3 2890 txdata->tx_bd_prod += nbd;
9f6c9258 2891
6383c0b3 2892 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
9f6c9258
DK
2893 netif_tx_stop_queue(txq);
2894
2895 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2896 * ordering of set_bit() in netif_tx_stop_queue() and read of
2897 * fp->bd_tx_cons */
2898 smp_mb();
2899
2900 fp->eth_q_stats.driver_xoff++;
6383c0b3 2901 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
9f6c9258
DK
2902 netif_tx_wake_queue(txq);
2903 }
6383c0b3 2904 txdata->tx_pkt++;
9f6c9258
DK
2905
2906 return NETDEV_TX_OK;
2907}
f85582f8 2908
6383c0b3
AE
2909/**
2910 * bnx2x_setup_tc - routine to configure net_device for multi tc
2911 *
2912 * @netdev: net device to configure
2913 * @tc: number of traffic classes to enable
2914 *
2915 * callback connected to the ndo_setup_tc function pointer
2916 */
2917int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2918{
2919 int cos, prio, count, offset;
2920 struct bnx2x *bp = netdev_priv(dev);
2921
2922 /* setup tc must be called under rtnl lock */
2923 ASSERT_RTNL();
2924
2925 /* no traffic classes requested. aborting */
2926 if (!num_tc) {
2927 netdev_reset_tc(dev);
2928 return 0;
2929 }
2930
2931 /* requested to support too many traffic classes */
2932 if (num_tc > bp->max_cos) {
2933 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
2934 " requested: %d. max supported is %d",
2935 num_tc, bp->max_cos);
2936 return -EINVAL;
2937 }
2938
2939 /* declare amount of supported traffic classes */
2940 if (netdev_set_num_tc(dev, num_tc)) {
2941 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes",
2942 num_tc);
2943 return -EINVAL;
2944 }
2945
2946 /* configure priority to traffic class mapping */
2947 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2948 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
2949 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d",
2950 prio, bp->prio_to_cos[prio]);
2951 }
2952
2953
2954 /* Use this configuration to diffrentiate tc0 from other COSes
2955 This can be used for ets or pfc, and save the effort of setting
2956 up a multio class queue disc or negotiating DCBX with a switch
2957 netdev_set_prio_tc_map(dev, 0, 0);
2958 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
2959 for (prio = 1; prio < 16; prio++) {
2960 netdev_set_prio_tc_map(dev, prio, 1);
2961 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
2962 } */
2963
2964 /* configure traffic class to transmission queue mapping */
2965 for (cos = 0; cos < bp->max_cos; cos++) {
2966 count = BNX2X_NUM_ETH_QUEUES(bp);
2967 offset = cos * MAX_TXQS_PER_COS;
2968 netdev_set_tc_queue(dev, cos, count, offset);
2969 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d",
2970 cos, offset, count);
2971 }
2972
2973 return 0;
2974}
2975
9f6c9258
DK
2976/* called with rtnl_lock */
2977int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2978{
2979 struct sockaddr *addr = p;
2980 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 2981 int rc = 0;
9f6c9258
DK
2982
2983 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2984 return -EINVAL;
2985
619c5cb6
VZ
2986 if (netif_running(dev)) {
2987 rc = bnx2x_set_eth_mac(bp, false);
2988 if (rc)
2989 return rc;
2990 }
2991
9f6c9258 2992 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 2993
523224a3 2994 if (netif_running(dev))
619c5cb6 2995 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 2996
619c5cb6 2997 return rc;
9f6c9258
DK
2998}
2999
b3b83c3f
DK
3000static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3001{
3002 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3003 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 3004 u8 cos;
b3b83c3f
DK
3005
3006 /* Common */
3007#ifdef BCM_CNIC
3008 if (IS_FCOE_IDX(fp_index)) {
3009 memset(sb, 0, sizeof(union host_hc_status_block));
3010 fp->status_blk_mapping = 0;
3011
3012 } else {
3013#endif
3014 /* status blocks */
619c5cb6 3015 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3016 BNX2X_PCI_FREE(sb->e2_sb,
3017 bnx2x_fp(bp, fp_index,
3018 status_blk_mapping),
3019 sizeof(struct host_hc_status_block_e2));
3020 else
3021 BNX2X_PCI_FREE(sb->e1x_sb,
3022 bnx2x_fp(bp, fp_index,
3023 status_blk_mapping),
3024 sizeof(struct host_hc_status_block_e1x));
3025#ifdef BCM_CNIC
3026 }
3027#endif
3028 /* Rx */
3029 if (!skip_rx_queue(bp, fp_index)) {
3030 bnx2x_free_rx_bds(fp);
3031
3032 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3033 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3034 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3035 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3036 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3037
3038 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3039 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3040 sizeof(struct eth_fast_path_rx_cqe) *
3041 NUM_RCQ_BD);
3042
3043 /* SGE ring */
3044 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3045 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3046 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3047 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3048 }
3049
3050 /* Tx */
3051 if (!skip_tx_queue(bp, fp_index)) {
3052 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3
AE
3053 for_each_cos_in_tx_queue(fp, cos) {
3054 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3055
3056 DP(BNX2X_MSG_SP,
3057 "freeing tx memory of fp %d cos %d cid %d",
3058 fp_index, cos, txdata->cid);
3059
3060 BNX2X_FREE(txdata->tx_buf_ring);
3061 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3062 txdata->tx_desc_mapping,
3063 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3064 }
b3b83c3f
DK
3065 }
3066 /* end of fastpath */
3067}
3068
3069void bnx2x_free_fp_mem(struct bnx2x *bp)
3070{
3071 int i;
3072 for_each_queue(bp, i)
3073 bnx2x_free_fp_mem_at(bp, i);
3074}
3075
3076static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3077{
3078 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 3079 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
3080 bnx2x_fp(bp, index, sb_index_values) =
3081 (__le16 *)status_blk.e2_sb->sb.index_values;
3082 bnx2x_fp(bp, index, sb_running_index) =
3083 (__le16 *)status_blk.e2_sb->sb.running_index;
3084 } else {
3085 bnx2x_fp(bp, index, sb_index_values) =
3086 (__le16 *)status_blk.e1x_sb->sb.index_values;
3087 bnx2x_fp(bp, index, sb_running_index) =
3088 (__le16 *)status_blk.e1x_sb->sb.running_index;
3089 }
3090}
3091
3092static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3093{
3094 union host_hc_status_block *sb;
3095 struct bnx2x_fastpath *fp = &bp->fp[index];
3096 int ring_size = 0;
6383c0b3 3097 u8 cos;
b3b83c3f
DK
3098
3099 /* if rx_ring_size specified - use it */
3100 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
6383c0b3 3101 MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
b3b83c3f
DK
3102
3103 /* allocate at least number of buffers required by FW */
6383c0b3 3104 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
b3b83c3f
DK
3105 MIN_RX_SIZE_TPA,
3106 rx_ring_size);
3107
b3b83c3f
DK
3108 /* Common */
3109 sb = &bnx2x_fp(bp, index, status_blk);
3110#ifdef BCM_CNIC
3111 if (!IS_FCOE_IDX(index)) {
3112#endif
3113 /* status blocks */
619c5cb6 3114 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3115 BNX2X_PCI_ALLOC(sb->e2_sb,
3116 &bnx2x_fp(bp, index, status_blk_mapping),
3117 sizeof(struct host_hc_status_block_e2));
3118 else
3119 BNX2X_PCI_ALLOC(sb->e1x_sb,
3120 &bnx2x_fp(bp, index, status_blk_mapping),
3121 sizeof(struct host_hc_status_block_e1x));
3122#ifdef BCM_CNIC
3123 }
3124#endif
8eef2af1
DK
3125
3126 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3127 * set shortcuts for it.
3128 */
3129 if (!IS_FCOE_IDX(index))
3130 set_sb_shortcuts(bp, index);
b3b83c3f
DK
3131
3132 /* Tx */
3133 if (!skip_tx_queue(bp, index)) {
3134 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3
AE
3135 for_each_cos_in_tx_queue(fp, cos) {
3136 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3137
3138 DP(BNX2X_MSG_SP, "allocating tx memory of "
3139 "fp %d cos %d",
3140 index, cos);
3141
3142 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 3143 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
3144 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3145 &txdata->tx_desc_mapping,
b3b83c3f 3146 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 3147 }
b3b83c3f
DK
3148 }
3149
3150 /* Rx */
3151 if (!skip_rx_queue(bp, index)) {
3152 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3153 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3154 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3155 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3156 &bnx2x_fp(bp, index, rx_desc_mapping),
3157 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3158
3159 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3160 &bnx2x_fp(bp, index, rx_comp_mapping),
3161 sizeof(struct eth_fast_path_rx_cqe) *
3162 NUM_RCQ_BD);
3163
3164 /* SGE ring */
3165 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3166 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3167 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3168 &bnx2x_fp(bp, index, rx_sge_mapping),
3169 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3170 /* RX BD ring */
3171 bnx2x_set_next_page_rx_bd(fp);
3172
3173 /* CQ ring */
3174 bnx2x_set_next_page_rx_cq(fp);
3175
3176 /* BDs */
3177 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3178 if (ring_size < rx_ring_size)
3179 goto alloc_mem_err;
3180 }
3181
3182 return 0;
3183
3184/* handles low memory cases */
3185alloc_mem_err:
3186 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3187 index, ring_size);
3188 /* FW will drop all packets if queue is not big enough,
3189 * In these cases we disable the queue
6383c0b3 3190 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
3191 */
3192 if (ring_size < (fp->disable_tpa ?
eb722d7a 3193 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
3194 /* release memory allocated for this queue */
3195 bnx2x_free_fp_mem_at(bp, index);
3196 return -ENOMEM;
3197 }
3198 return 0;
3199}
3200
3201int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3202{
3203 int i;
3204
3205 /**
3206 * 1. Allocate FP for leading - fatal if error
3207 * 2. {CNIC} Allocate FCoE FP - fatal if error
6383c0b3
AE
3208 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3209 * 4. Allocate RSS - fix number of queues if error
b3b83c3f
DK
3210 */
3211
3212 /* leading */
3213 if (bnx2x_alloc_fp_mem_at(bp, 0))
3214 return -ENOMEM;
6383c0b3 3215
b3b83c3f 3216#ifdef BCM_CNIC
8eef2af1
DK
3217 if (!NO_FCOE(bp))
3218 /* FCoE */
3219 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3220 /* we will fail load process instead of mark
3221 * NO_FCOE_FLAG
3222 */
3223 return -ENOMEM;
b3b83c3f 3224#endif
6383c0b3 3225
b3b83c3f
DK
3226 /* RSS */
3227 for_each_nondefault_eth_queue(bp, i)
3228 if (bnx2x_alloc_fp_mem_at(bp, i))
3229 break;
3230
3231 /* handle memory failures */
3232 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3233 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3234
3235 WARN_ON(delta < 0);
3236#ifdef BCM_CNIC
3237 /**
3238 * move non eth FPs next to last eth FP
3239 * must be done in that order
3240 * FCOE_IDX < FWD_IDX < OOO_IDX
3241 */
3242
6383c0b3 3243 /* move FCoE fp even NO_FCOE_FLAG is on */
b3b83c3f
DK
3244 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3245#endif
3246 bp->num_queues -= delta;
3247 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3248 bp->num_queues + delta, bp->num_queues);
3249 }
3250
3251 return 0;
3252}
d6214d7a 3253
523224a3
DK
3254void bnx2x_free_mem_bp(struct bnx2x *bp)
3255{
3256 kfree(bp->fp);
3257 kfree(bp->msix_table);
3258 kfree(bp->ilt);
3259}
3260
3261int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3262{
3263 struct bnx2x_fastpath *fp;
3264 struct msix_entry *tbl;
3265 struct bnx2x_ilt *ilt;
6383c0b3
AE
3266 int msix_table_size = 0;
3267
3268 /*
3269 * The biggest MSI-X table we might need is as a maximum number of fast
3270 * path IGU SBs plus default SB (for PF).
3271 */
3272 msix_table_size = bp->igu_sb_cnt + 1;
523224a3 3273
6383c0b3
AE
3274 /* fp array: RSS plus CNIC related L2 queues */
3275 fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3276 sizeof(*fp), GFP_KERNEL);
523224a3
DK
3277 if (!fp)
3278 goto alloc_err;
3279 bp->fp = fp;
3280
3281 /* msix table */
6383c0b3 3282 tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
523224a3
DK
3283 if (!tbl)
3284 goto alloc_err;
3285 bp->msix_table = tbl;
3286
3287 /* ilt */
3288 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3289 if (!ilt)
3290 goto alloc_err;
3291 bp->ilt = ilt;
3292
3293 return 0;
3294alloc_err:
3295 bnx2x_free_mem_bp(bp);
3296 return -ENOMEM;
3297
3298}
3299
a9fccec7 3300int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
3301{
3302 struct bnx2x *bp = netdev_priv(dev);
3303
3304 if (unlikely(!netif_running(dev)))
3305 return 0;
3306
3307 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3308 return bnx2x_nic_load(bp, LOAD_NORMAL);
3309}
3310
1ac9e428
YR
3311int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3312{
3313 u32 sel_phy_idx = 0;
3314 if (bp->link_params.num_phys <= 1)
3315 return INT_PHY;
3316
3317 if (bp->link_vars.link_up) {
3318 sel_phy_idx = EXT_PHY1;
3319 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3320 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3321 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3322 sel_phy_idx = EXT_PHY2;
3323 } else {
3324
3325 switch (bnx2x_phy_selection(&bp->link_params)) {
3326 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3327 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3328 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3329 sel_phy_idx = EXT_PHY1;
3330 break;
3331 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3332 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3333 sel_phy_idx = EXT_PHY2;
3334 break;
3335 }
3336 }
3337
3338 return sel_phy_idx;
3339
3340}
3341int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3342{
3343 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3344 /*
3345 * The selected actived PHY is always after swapping (in case PHY
3346 * swapping is enabled). So when swapping is enabled, we need to reverse
3347 * the configuration
3348 */
3349
3350 if (bp->link_params.multi_phy_config &
3351 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3352 if (sel_phy_idx == EXT_PHY1)
3353 sel_phy_idx = EXT_PHY2;
3354 else if (sel_phy_idx == EXT_PHY2)
3355 sel_phy_idx = EXT_PHY1;
3356 }
3357 return LINK_CONFIG_IDX(sel_phy_idx);
3358}
3359
bf61ee14
VZ
3360#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3361int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3362{
3363 struct bnx2x *bp = netdev_priv(dev);
3364 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3365
3366 switch (type) {
3367 case NETDEV_FCOE_WWNN:
3368 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3369 cp->fcoe_wwn_node_name_lo);
3370 break;
3371 case NETDEV_FCOE_WWPN:
3372 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3373 cp->fcoe_wwn_port_name_lo);
3374 break;
3375 default:
3376 return -EINVAL;
3377 }
3378
3379 return 0;
3380}
3381#endif
3382
9f6c9258
DK
3383/* called with rtnl_lock */
3384int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3385{
3386 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
3387
3388 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3389 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3390 return -EAGAIN;
3391 }
3392
3393 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3394 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3395 return -EINVAL;
3396
3397 /* This does not race with packet allocation
3398 * because the actual alloc size is
3399 * only updated as part of load
3400 */
3401 dev->mtu = new_mtu;
3402
66371c44
MM
3403 return bnx2x_reload_if_running(dev);
3404}
3405
3406u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3407{
3408 struct bnx2x *bp = netdev_priv(dev);
3409
3410 /* TPA requires Rx CSUM offloading */
3411 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3412 features &= ~NETIF_F_LRO;
3413
3414 return features;
3415}
3416
3417int bnx2x_set_features(struct net_device *dev, u32 features)
3418{
3419 struct bnx2x *bp = netdev_priv(dev);
3420 u32 flags = bp->flags;
538dd2e3 3421 bool bnx2x_reload = false;
66371c44
MM
3422
3423 if (features & NETIF_F_LRO)
3424 flags |= TPA_ENABLE_FLAG;
3425 else
3426 flags &= ~TPA_ENABLE_FLAG;
3427
538dd2e3
MB
3428 if (features & NETIF_F_LOOPBACK) {
3429 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3430 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3431 bnx2x_reload = true;
3432 }
3433 } else {
3434 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3435 bp->link_params.loopback_mode = LOOPBACK_NONE;
3436 bnx2x_reload = true;
3437 }
3438 }
3439
66371c44
MM
3440 if (flags ^ bp->flags) {
3441 bp->flags = flags;
538dd2e3
MB
3442 bnx2x_reload = true;
3443 }
66371c44 3444
538dd2e3 3445 if (bnx2x_reload) {
66371c44
MM
3446 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3447 return bnx2x_reload_if_running(dev);
3448 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
3449 }
3450
66371c44 3451 return 0;
9f6c9258
DK
3452}
3453
3454void bnx2x_tx_timeout(struct net_device *dev)
3455{
3456 struct bnx2x *bp = netdev_priv(dev);
3457
3458#ifdef BNX2X_STOP_ON_ERROR
3459 if (!bp->panic)
3460 bnx2x_panic();
3461#endif
7be08a72
AE
3462
3463 smp_mb__before_clear_bit();
3464 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3465 smp_mb__after_clear_bit();
3466
9f6c9258 3467 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 3468 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
3469}
3470
9f6c9258
DK
3471int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3472{
3473 struct net_device *dev = pci_get_drvdata(pdev);
3474 struct bnx2x *bp;
3475
3476 if (!dev) {
3477 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3478 return -ENODEV;
3479 }
3480 bp = netdev_priv(dev);
3481
3482 rtnl_lock();
3483
3484 pci_save_state(pdev);
3485
3486 if (!netif_running(dev)) {
3487 rtnl_unlock();
3488 return 0;
3489 }
3490
3491 netif_device_detach(dev);
3492
3493 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3494
3495 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3496
3497 rtnl_unlock();
3498
3499 return 0;
3500}
3501
3502int bnx2x_resume(struct pci_dev *pdev)
3503{
3504 struct net_device *dev = pci_get_drvdata(pdev);
3505 struct bnx2x *bp;
3506 int rc;
3507
3508 if (!dev) {
3509 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3510 return -ENODEV;
3511 }
3512 bp = netdev_priv(dev);
3513
3514 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3515 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3516 return -EAGAIN;
3517 }
3518
3519 rtnl_lock();
3520
3521 pci_restore_state(pdev);
3522
3523 if (!netif_running(dev)) {
3524 rtnl_unlock();
3525 return 0;
3526 }
3527
3528 bnx2x_set_power_state(bp, PCI_D0);
3529 netif_device_attach(dev);
3530
f2e0899f
DK
3531 /* Since the chip was reset, clear the FW sequence number */
3532 bp->fw_seq = 0;
9f6c9258
DK
3533 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3534
3535 rtnl_unlock();
3536
3537 return rc;
3538}
619c5cb6
VZ
3539
3540
3541void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3542 u32 cid)
3543{
3544 /* ustorm cxt validation */
3545 cxt->ustorm_ag_context.cdu_usage =
3546 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3547 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3548 /* xcontext validation */
3549 cxt->xstorm_ag_context.cdu_reserved =
3550 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3551 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3552}
3553
3554static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3555 u8 fw_sb_id, u8 sb_index,
3556 u8 ticks)
3557{
3558
3559 u32 addr = BAR_CSTRORM_INTMEM +
3560 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3561 REG_WR8(bp, addr, ticks);
3562 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3563 port, fw_sb_id, sb_index, ticks);
3564}
3565
3566static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3567 u16 fw_sb_id, u8 sb_index,
3568 u8 disable)
3569{
3570 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3571 u32 addr = BAR_CSTRORM_INTMEM +
3572 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3573 u16 flags = REG_RD16(bp, addr);
3574 /* clear and set */
3575 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3576 flags |= enable_flag;
3577 REG_WR16(bp, addr, flags);
3578 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3579 port, fw_sb_id, sb_index, disable);
3580}
3581
3582void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3583 u8 sb_index, u8 disable, u16 usec)
3584{
3585 int port = BP_PORT(bp);
3586 u8 ticks = usec / BNX2X_BTR;
3587
3588 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3589
3590 disable = disable ? 1 : (usec ? 0 : 1);
3591 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3592}