bnx2x: remove references to intr_sem
[linux-2.6-block.git] / drivers / net / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
5de92408 3 * Copyright (c) 2007-2011 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
9f6c9258 18#include <linux/etherdevice.h>
9bcc0893 19#include <linux/if_vlan.h>
a6b7a407 20#include <linux/interrupt.h>
9f6c9258 21#include <linux/ip.h>
f2e0899f 22#include <net/ipv6.h>
7f3e01fe 23#include <net/ip6_checksum.h>
6891dd25 24#include <linux/firmware.h>
c0cba59e 25#include <linux/prefetch.h>
9f6c9258
DK
26#include "bnx2x_cmn.h"
27
523224a3
DK
28#include "bnx2x_init.h"
29
8d96286a 30static int bnx2x_setup_irqs(struct bnx2x *bp);
9f6c9258 31
b3b83c3f
DK
32/**
33 * bnx2x_bz_fp - zero content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @index: fastpath index to be zeroed
37 *
38 * Makes sure the contents of the bp->fp[index].napi is kept
39 * intact.
40 */
41static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42{
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
47
48 /* Restore the NAPI object as it has been already initialized */
49 fp->napi = orig_napi;
50}
51
52/**
53 * bnx2x_move_fp - move content of the fastpath structure.
54 *
55 * @bp: driver handle
56 * @from: source FP index
57 * @to: destination FP index
58 *
59 * Makes sure the contents of the bp->fp[to].napi is kept
60 * intact.
61 */
62static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
63{
64 struct bnx2x_fastpath *from_fp = &bp->fp[from];
65 struct bnx2x_fastpath *to_fp = &bp->fp[to];
66 struct napi_struct orig_napi = to_fp->napi;
67 /* Move bnx2x_fastpath contents */
68 memcpy(to_fp, from_fp, sizeof(*to_fp));
69 to_fp->index = to;
70
71 /* Restore the NAPI object as it has been already initialized */
72 to_fp->napi = orig_napi;
73}
74
9f6c9258
DK
75/* free skb in the packet ring at pos idx
76 * return idx of last bd freed
77 */
78static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
79 u16 idx)
80{
81 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
82 struct eth_tx_start_bd *tx_start_bd;
83 struct eth_tx_bd *tx_data_bd;
84 struct sk_buff *skb = tx_buf->skb;
85 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
86 int nbd;
87
88 /* prefetch skb end pointer to speedup dev_kfree_skb() */
89 prefetch(&skb->end);
90
91 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
92 idx, tx_buf, skb);
93
94 /* unmap first bd */
95 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
96 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
97 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 98 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
99
100 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
101#ifdef BNX2X_STOP_ON_ERROR
102 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
103 BNX2X_ERR("BAD nbd!\n");
104 bnx2x_panic();
105 }
106#endif
107 new_cons = nbd + tx_buf->first_bd;
108
109 /* Get the next bd */
110 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
111
112 /* Skip a parse bd... */
113 --nbd;
114 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
115
116 /* ...and the TSO split header bd since they have no mapping */
117 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
118 --nbd;
119 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
120 }
121
122 /* now free frags */
123 while (nbd > 0) {
124
125 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
126 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
127 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
128 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
129 if (--nbd)
130 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
131 }
132
133 /* release skb */
134 WARN_ON(!skb);
40955532 135 dev_kfree_skb_any(skb);
9f6c9258
DK
136 tx_buf->first_bd = 0;
137 tx_buf->skb = NULL;
138
139 return new_cons;
140}
141
142int bnx2x_tx_int(struct bnx2x_fastpath *fp)
143{
144 struct bnx2x *bp = fp->bp;
145 struct netdev_queue *txq;
146 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
147
148#ifdef BNX2X_STOP_ON_ERROR
149 if (unlikely(bp->panic))
150 return -1;
151#endif
152
153 txq = netdev_get_tx_queue(bp->dev, fp->index);
154 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
155 sw_cons = fp->tx_pkt_cons;
156
157 while (sw_cons != hw_cons) {
158 u16 pkt_cons;
159
160 pkt_cons = TX_BD(sw_cons);
161
f2e0899f
DK
162 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
163 " pkt_cons %u\n",
164 fp->index, hw_cons, sw_cons, pkt_cons);
9f6c9258 165
9f6c9258
DK
166 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
167 sw_cons++;
168 }
169
170 fp->tx_pkt_cons = sw_cons;
171 fp->tx_bd_cons = bd_cons;
172
173 /* Need to make the tx_bd_cons update visible to start_xmit()
174 * before checking for netif_tx_queue_stopped(). Without the
175 * memory barrier, there is a small possibility that
176 * start_xmit() will miss it and cause the queue to be stopped
177 * forever.
178 */
179 smp_mb();
180
9f6c9258
DK
181 if (unlikely(netif_tx_queue_stopped(txq))) {
182 /* Taking tx_lock() is needed to prevent reenabling the queue
183 * while it's empty. This could have happen if rx_action() gets
184 * suspended in bnx2x_tx_int() after the condition before
185 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
186 *
187 * stops the queue->sees fresh tx_bd_cons->releases the queue->
188 * sends some packets consuming the whole queue again->
189 * stops the queue
190 */
191
192 __netif_tx_lock(txq, smp_processor_id());
193
194 if ((netif_tx_queue_stopped(txq)) &&
195 (bp->state == BNX2X_STATE_OPEN) &&
196 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
197 netif_tx_wake_queue(txq);
198
199 __netif_tx_unlock(txq);
200 }
201 return 0;
202}
203
204static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
205 u16 idx)
206{
207 u16 last_max = fp->last_max_sge;
208
209 if (SUB_S16(idx, last_max) > 0)
210 fp->last_max_sge = idx;
211}
212
213static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
214 struct eth_fast_path_rx_cqe *fp_cqe)
215{
216 struct bnx2x *bp = fp->bp;
217 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
218 le16_to_cpu(fp_cqe->len_on_bd)) >>
219 SGE_PAGE_SHIFT;
220 u16 last_max, last_elem, first_elem;
221 u16 delta = 0;
222 u16 i;
223
224 if (!sge_len)
225 return;
226
227 /* First mark all used pages */
228 for (i = 0; i < sge_len; i++)
523224a3
DK
229 SGE_MASK_CLEAR_BIT(fp,
230 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
231
232 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 233 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
234
235 /* Here we assume that the last SGE index is the biggest */
236 prefetch((void *)(fp->sge_mask));
523224a3
DK
237 bnx2x_update_last_max_sge(fp,
238 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
239
240 last_max = RX_SGE(fp->last_max_sge);
241 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
242 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
243
244 /* If ring is not full */
245 if (last_elem + 1 != first_elem)
246 last_elem++;
247
248 /* Now update the prod */
249 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
250 if (likely(fp->sge_mask[i]))
251 break;
252
253 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
254 delta += RX_SGE_MASK_ELEM_SZ;
255 }
256
257 if (delta > 0) {
258 fp->rx_sge_prod += delta;
259 /* clear page-end entries */
260 bnx2x_clear_sge_mask_next_elems(fp);
261 }
262
263 DP(NETIF_MSG_RX_STATUS,
264 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
265 fp->last_max_sge, fp->rx_sge_prod);
266}
267
268static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
269 struct sk_buff *skb, u16 cons, u16 prod)
270{
271 struct bnx2x *bp = fp->bp;
272 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
273 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
274 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
275 dma_addr_t mapping;
276
277 /* move empty skb from pool to prod and map it */
278 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
279 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
a8c94b91 280 fp->rx_buf_size, DMA_FROM_DEVICE);
9f6c9258
DK
281 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
282
283 /* move partial skb from cons to pool (don't unmap yet) */
284 fp->tpa_pool[queue] = *cons_rx_buf;
285
286 /* mark bin state as start - print error if current state != stop */
287 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
288 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
289
290 fp->tpa_state[queue] = BNX2X_TPA_START;
291
292 /* point prod_bd to new skb */
293 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
294 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
295
296#ifdef BNX2X_STOP_ON_ERROR
297 fp->tpa_queue_used |= (1 << queue);
298#ifdef _ASM_GENERIC_INT_L64_H
299 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
300#else
301 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
302#endif
303 fp->tpa_queue_used);
304#endif
305}
306
e4e3c02a
VZ
307/* Timestamp option length allowed for TPA aggregation:
308 *
309 * nop nop kind length echo val
310 */
311#define TPA_TSTAMP_OPT_LEN 12
312/**
e8920674 313 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 314 *
e8920674
DK
315 * @bp: driver handle
316 * @parsing_flags: parsing flags from the START CQE
317 * @len_on_bd: total length of the first packet for the
318 * aggregation.
319 *
320 * Approximate value of the MSS for this aggregation calculated using
321 * the first packet of it.
e4e3c02a
VZ
322 */
323static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
324 u16 len_on_bd)
325{
326 /* TPA arrgregation won't have an IP options and TCP options
327 * other than timestamp.
328 */
329 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
330
331
332 /* Check if there was a TCP timestamp, if there is it's will
333 * always be 12 bytes length: nop nop kind length echo val.
334 *
335 * Otherwise FW would close the aggregation.
336 */
337 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
338 hdrs_len += TPA_TSTAMP_OPT_LEN;
339
340 return len_on_bd - hdrs_len;
341}
342
9f6c9258
DK
343static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
344 struct sk_buff *skb,
345 struct eth_fast_path_rx_cqe *fp_cqe,
e4e3c02a 346 u16 cqe_idx, u16 parsing_flags)
9f6c9258
DK
347{
348 struct sw_rx_page *rx_pg, old_rx_pg;
349 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
350 u32 i, frag_len, frag_size, pages;
351 int err;
352 int j;
353
354 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
355 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
356
357 /* This is needed in order to enable forwarding support */
358 if (frag_size)
e4e3c02a
VZ
359 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
360 len_on_bd);
9f6c9258
DK
361
362#ifdef BNX2X_STOP_ON_ERROR
363 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
364 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
365 pages, cqe_idx);
366 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
367 fp_cqe->pkt_len, len_on_bd);
368 bnx2x_panic();
369 return -EINVAL;
370 }
371#endif
372
373 /* Run through the SGL and compose the fragmented skb */
374 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
523224a3
DK
375 u16 sge_idx =
376 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
377
378 /* FW gives the indices of the SGE as if the ring is an array
379 (meaning that "next" element will consume 2 indices) */
380 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
381 rx_pg = &fp->rx_page_ring[sge_idx];
382 old_rx_pg = *rx_pg;
383
384 /* If we fail to allocate a substitute page, we simply stop
385 where we are and drop the whole packet */
386 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
387 if (unlikely(err)) {
388 fp->eth_q_stats.rx_skb_alloc_failed++;
389 return err;
390 }
391
392 /* Unmap the page as we r going to pass it to the stack */
393 dma_unmap_page(&bp->pdev->dev,
394 dma_unmap_addr(&old_rx_pg, mapping),
395 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
396
397 /* Add one frag and update the appropriate fields in the skb */
398 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
399
400 skb->data_len += frag_len;
401 skb->truesize += frag_len;
402 skb->len += frag_len;
403
404 frag_size -= frag_len;
405 }
406
407 return 0;
408}
409
410static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
411 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
412 u16 cqe_idx)
413{
414 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
415 struct sk_buff *skb = rx_buf->skb;
416 /* alloc new skb */
a8c94b91 417 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
9f6c9258
DK
418
419 /* Unmap skb in the pool anyway, as we are going to change
420 pool entry status to BNX2X_TPA_STOP even if new skb allocation
421 fails. */
422 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 423 fp->rx_buf_size, DMA_FROM_DEVICE);
9f6c9258
DK
424
425 if (likely(new_skb)) {
426 /* fix ip xsum and give it to the stack */
427 /* (no need to map the new skb) */
e4e3c02a
VZ
428 u16 parsing_flags =
429 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
9f6c9258
DK
430
431 prefetch(skb);
217de5aa 432 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
433
434#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 435 if (pad + len > fp->rx_buf_size) {
9f6c9258
DK
436 BNX2X_ERR("skb_put is about to fail... "
437 "pad %d len %d rx_buf_size %d\n",
a8c94b91 438 pad, len, fp->rx_buf_size);
9f6c9258
DK
439 bnx2x_panic();
440 return;
441 }
442#endif
443
444 skb_reserve(skb, pad);
445 skb_put(skb, len);
446
447 skb->protocol = eth_type_trans(skb, bp->dev);
448 skb->ip_summed = CHECKSUM_UNNECESSARY;
449
450 {
451 struct iphdr *iph;
452
453 iph = (struct iphdr *)skb->data;
9f6c9258
DK
454 iph->check = 0;
455 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
456 }
457
458 if (!bnx2x_fill_frag_skb(bp, fp, skb,
e4e3c02a
VZ
459 &cqe->fast_path_cqe, cqe_idx,
460 parsing_flags)) {
461 if (parsing_flags & PARSING_FLAGS_VLAN)
9bcc0893 462 __vlan_hwaccel_put_tag(skb,
9f6c9258 463 le16_to_cpu(cqe->fast_path_cqe.
9bcc0893
HZ
464 vlan_tag));
465 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
466 } else {
467 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
468 " - dropping packet!\n");
40955532 469 dev_kfree_skb_any(skb);
9f6c9258
DK
470 }
471
472
473 /* put new skb in bin */
474 fp->tpa_pool[queue].skb = new_skb;
475
476 } else {
477 /* else drop the packet and keep the buffer in the bin */
478 DP(NETIF_MSG_RX_STATUS,
479 "Failed to allocate new skb - dropping packet!\n");
480 fp->eth_q_stats.rx_skb_alloc_failed++;
481 }
482
483 fp->tpa_state[queue] = BNX2X_TPA_STOP;
484}
485
486/* Set Toeplitz hash value in the skb using the value from the
487 * CQE (calculated by HW).
488 */
489static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
490 struct sk_buff *skb)
491{
492 /* Set Toeplitz hash from CQE */
493 if ((bp->dev->features & NETIF_F_RXHASH) &&
494 (cqe->fast_path_cqe.status_flags &
495 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
496 skb->rxhash =
497 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
498}
499
500int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
501{
502 struct bnx2x *bp = fp->bp;
503 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
504 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
505 int rx_pkt = 0;
506
507#ifdef BNX2X_STOP_ON_ERROR
508 if (unlikely(bp->panic))
509 return 0;
510#endif
511
512 /* CQ "next element" is of the size of the regular element,
513 that's why it's ok here */
514 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
515 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
516 hw_comp_cons++;
517
518 bd_cons = fp->rx_bd_cons;
519 bd_prod = fp->rx_bd_prod;
520 bd_prod_fw = bd_prod;
521 sw_comp_cons = fp->rx_comp_cons;
522 sw_comp_prod = fp->rx_comp_prod;
523
524 /* Memory barrier necessary as speculative reads of the rx
525 * buffer can be ahead of the index in the status block
526 */
527 rmb();
528
529 DP(NETIF_MSG_RX_STATUS,
530 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
531 fp->index, hw_comp_cons, sw_comp_cons);
532
533 while (sw_comp_cons != hw_comp_cons) {
534 struct sw_rx_bd *rx_buf = NULL;
535 struct sk_buff *skb;
536 union eth_rx_cqe *cqe;
537 u8 cqe_fp_flags;
538 u16 len, pad;
539
540 comp_ring_cons = RCQ_BD(sw_comp_cons);
541 bd_prod = RX_BD(bd_prod);
542 bd_cons = RX_BD(bd_cons);
543
544 /* Prefetch the page containing the BD descriptor
545 at producer's index. It will be needed when new skb is
546 allocated */
547 prefetch((void *)(PAGE_ALIGN((unsigned long)
548 (&fp->rx_desc_ring[bd_prod])) -
549 PAGE_SIZE + 1));
550
551 cqe = &fp->rx_comp_ring[comp_ring_cons];
552 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
553
554 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
555 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
556 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
557 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
558 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
559 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
560
561 /* is this a slowpath msg? */
562 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
563 bnx2x_sp_event(fp, cqe);
564 goto next_cqe;
565
566 /* this is an rx packet */
567 } else {
568 rx_buf = &fp->rx_buf_ring[bd_cons];
569 skb = rx_buf->skb;
570 prefetch(skb);
571 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
572 pad = cqe->fast_path_cqe.placement_offset;
573
fe78d263
VZ
574 /* - If CQE is marked both TPA_START and TPA_END it is
575 * a non-TPA CQE.
576 * - FP CQE will always have either TPA_START or/and
577 * TPA_STOP flags set.
578 */
9f6c9258
DK
579 if ((!fp->disable_tpa) &&
580 (TPA_TYPE(cqe_fp_flags) !=
581 (TPA_TYPE_START | TPA_TYPE_END))) {
582 u16 queue = cqe->fast_path_cqe.queue_index;
583
584 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
585 DP(NETIF_MSG_RX_STATUS,
586 "calling tpa_start on queue %d\n",
587 queue);
588
589 bnx2x_tpa_start(fp, queue, skb,
590 bd_cons, bd_prod);
591
592 /* Set Toeplitz hash for an LRO skb */
593 bnx2x_set_skb_rxhash(bp, cqe, skb);
594
595 goto next_rx;
fe78d263 596 } else { /* TPA_STOP */
9f6c9258
DK
597 DP(NETIF_MSG_RX_STATUS,
598 "calling tpa_stop on queue %d\n",
599 queue);
600
601 if (!BNX2X_RX_SUM_FIX(cqe))
602 BNX2X_ERR("STOP on none TCP "
603 "data\n");
604
605 /* This is a size of the linear data
606 on this skb */
607 len = le16_to_cpu(cqe->fast_path_cqe.
608 len_on_bd);
609 bnx2x_tpa_stop(bp, fp, queue, pad,
610 len, cqe, comp_ring_cons);
611#ifdef BNX2X_STOP_ON_ERROR
612 if (bp->panic)
613 return 0;
614#endif
615
616 bnx2x_update_sge_prod(fp,
617 &cqe->fast_path_cqe);
618 goto next_cqe;
619 }
620 }
621
622 dma_sync_single_for_device(&bp->pdev->dev,
623 dma_unmap_addr(rx_buf, mapping),
624 pad + RX_COPY_THRESH,
625 DMA_FROM_DEVICE);
217de5aa 626 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
627
628 /* is this an error packet? */
629 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
630 DP(NETIF_MSG_RX_ERR,
631 "ERROR flags %x rx packet %u\n",
632 cqe_fp_flags, sw_comp_cons);
633 fp->eth_q_stats.rx_err_discard_pkt++;
634 goto reuse_rx;
635 }
636
637 /* Since we don't have a jumbo ring
638 * copy small packets if mtu > 1500
639 */
640 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
641 (len <= RX_COPY_THRESH)) {
642 struct sk_buff *new_skb;
643
644 new_skb = netdev_alloc_skb(bp->dev,
645 len + pad);
646 if (new_skb == NULL) {
647 DP(NETIF_MSG_RX_ERR,
648 "ERROR packet dropped "
649 "because of alloc failure\n");
650 fp->eth_q_stats.rx_skb_alloc_failed++;
651 goto reuse_rx;
652 }
653
654 /* aligned copy */
655 skb_copy_from_linear_data_offset(skb, pad,
656 new_skb->data + pad, len);
657 skb_reserve(new_skb, pad);
658 skb_put(new_skb, len);
659
749a8503 660 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
661
662 skb = new_skb;
663
664 } else
665 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
666 dma_unmap_single(&bp->pdev->dev,
667 dma_unmap_addr(rx_buf, mapping),
a8c94b91 668 fp->rx_buf_size,
9f6c9258
DK
669 DMA_FROM_DEVICE);
670 skb_reserve(skb, pad);
671 skb_put(skb, len);
672
673 } else {
674 DP(NETIF_MSG_RX_ERR,
675 "ERROR packet dropped because "
676 "of alloc failure\n");
677 fp->eth_q_stats.rx_skb_alloc_failed++;
678reuse_rx:
749a8503 679 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
680 goto next_rx;
681 }
682
683 skb->protocol = eth_type_trans(skb, bp->dev);
684
685 /* Set Toeplitz hash for a none-LRO skb */
686 bnx2x_set_skb_rxhash(bp, cqe, skb);
687
bc8acf2c 688 skb_checksum_none_assert(skb);
f85582f8 689
66371c44 690 if (bp->dev->features & NETIF_F_RXCSUM) {
9f6c9258
DK
691 if (likely(BNX2X_RX_CSUM_OK(cqe)))
692 skb->ip_summed = CHECKSUM_UNNECESSARY;
693 else
694 fp->eth_q_stats.hw_csum_err++;
695 }
696 }
697
698 skb_record_rx_queue(skb, fp->index);
699
9bcc0893
HZ
700 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
701 PARSING_FLAGS_VLAN)
702 __vlan_hwaccel_put_tag(skb,
703 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
704 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
705
706
707next_rx:
708 rx_buf->skb = NULL;
709
710 bd_cons = NEXT_RX_IDX(bd_cons);
711 bd_prod = NEXT_RX_IDX(bd_prod);
712 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
713 rx_pkt++;
714next_cqe:
715 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
716 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
717
718 if (rx_pkt == budget)
719 break;
720 } /* while */
721
722 fp->rx_bd_cons = bd_cons;
723 fp->rx_bd_prod = bd_prod_fw;
724 fp->rx_comp_cons = sw_comp_cons;
725 fp->rx_comp_prod = sw_comp_prod;
726
727 /* Update producers */
728 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
729 fp->rx_sge_prod);
730
731 fp->rx_pkt += rx_pkt;
732 fp->rx_calls++;
733
734 return rx_pkt;
735}
736
737static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
738{
739 struct bnx2x_fastpath *fp = fp_cookie;
740 struct bnx2x *bp = fp->bp;
741
523224a3
DK
742 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
743 "[fp %d fw_sd %d igusb %d]\n",
744 fp->index, fp->fw_sb_id, fp->igu_sb_id);
745 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
746
747#ifdef BNX2X_STOP_ON_ERROR
748 if (unlikely(bp->panic))
749 return IRQ_HANDLED;
750#endif
751
752 /* Handle Rx and Tx according to MSI-X vector */
753 prefetch(fp->rx_cons_sb);
754 prefetch(fp->tx_cons_sb);
523224a3 755 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
756 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
757
758 return IRQ_HANDLED;
759}
760
9f6c9258
DK
761/* HW Lock for shared dual port PHYs */
762void bnx2x_acquire_phy_lock(struct bnx2x *bp)
763{
764 mutex_lock(&bp->port.phy_mutex);
765
766 if (bp->port.need_hw_lock)
767 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
768}
769
770void bnx2x_release_phy_lock(struct bnx2x *bp)
771{
772 if (bp->port.need_hw_lock)
773 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
774
775 mutex_unlock(&bp->port.phy_mutex);
776}
777
0793f83f
DK
778/* calculates MF speed according to current linespeed and MF configuration */
779u16 bnx2x_get_mf_speed(struct bnx2x *bp)
780{
781 u16 line_speed = bp->link_vars.line_speed;
782 if (IS_MF(bp)) {
faa6fcbb
DK
783 u16 maxCfg = bnx2x_extract_max_cfg(bp,
784 bp->mf_config[BP_VN(bp)]);
785
786 /* Calculate the current MAX line speed limit for the MF
787 * devices
0793f83f 788 */
faa6fcbb
DK
789 if (IS_MF_SI(bp))
790 line_speed = (line_speed * maxCfg) / 100;
791 else { /* SD mode */
0793f83f
DK
792 u16 vn_max_rate = maxCfg * 100;
793
794 if (vn_max_rate < line_speed)
795 line_speed = vn_max_rate;
faa6fcbb 796 }
0793f83f
DK
797 }
798
799 return line_speed;
800}
801
2ae17f66
VZ
802/**
803 * bnx2x_fill_report_data - fill link report data to report
804 *
805 * @bp: driver handle
806 * @data: link state to update
807 *
808 * It uses a none-atomic bit operations because is called under the mutex.
809 */
810static inline void bnx2x_fill_report_data(struct bnx2x *bp,
811 struct bnx2x_link_report_data *data)
812{
813 u16 line_speed = bnx2x_get_mf_speed(bp);
814
815 memset(data, 0, sizeof(*data));
816
817 /* Fill the report data: efective line speed */
818 data->line_speed = line_speed;
819
820 /* Link is down */
821 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
822 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
823 &data->link_report_flags);
824
825 /* Full DUPLEX */
826 if (bp->link_vars.duplex == DUPLEX_FULL)
827 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
828
829 /* Rx Flow Control is ON */
830 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
831 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
832
833 /* Tx Flow Control is ON */
834 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
835 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
836}
837
838/**
839 * bnx2x_link_report - report link status to OS.
840 *
841 * @bp: driver handle
842 *
843 * Calls the __bnx2x_link_report() under the same locking scheme
844 * as a link/PHY state managing code to ensure a consistent link
845 * reporting.
846 */
847
9f6c9258
DK
848void bnx2x_link_report(struct bnx2x *bp)
849{
2ae17f66
VZ
850 bnx2x_acquire_phy_lock(bp);
851 __bnx2x_link_report(bp);
852 bnx2x_release_phy_lock(bp);
853}
9f6c9258 854
2ae17f66
VZ
855/**
856 * __bnx2x_link_report - report link status to OS.
857 *
858 * @bp: driver handle
859 *
860 * None atomic inmlementation.
861 * Should be called under the phy_lock.
862 */
863void __bnx2x_link_report(struct bnx2x *bp)
864{
865 struct bnx2x_link_report_data cur_data;
9f6c9258 866
2ae17f66
VZ
867 /* reread mf_cfg */
868 if (!CHIP_IS_E1(bp))
869 bnx2x_read_mf_cfg(bp);
870
871 /* Read the current link report info */
872 bnx2x_fill_report_data(bp, &cur_data);
873
874 /* Don't report link down or exactly the same link status twice */
875 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
876 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
877 &bp->last_reported_link.link_report_flags) &&
878 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
879 &cur_data.link_report_flags)))
880 return;
881
882 bp->link_cnt++;
9f6c9258 883
2ae17f66
VZ
884 /* We are going to report a new link parameters now -
885 * remember the current data for the next time.
886 */
887 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 888
2ae17f66
VZ
889 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
890 &cur_data.link_report_flags)) {
891 netif_carrier_off(bp->dev);
892 netdev_err(bp->dev, "NIC Link is Down\n");
893 return;
894 } else {
895 netif_carrier_on(bp->dev);
896 netdev_info(bp->dev, "NIC Link is Up, ");
897 pr_cont("%d Mbps ", cur_data.line_speed);
9f6c9258 898
2ae17f66
VZ
899 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
900 &cur_data.link_report_flags))
9f6c9258
DK
901 pr_cont("full duplex");
902 else
903 pr_cont("half duplex");
904
2ae17f66
VZ
905 /* Handle the FC at the end so that only these flags would be
906 * possibly set. This way we may easily check if there is no FC
907 * enabled.
908 */
909 if (cur_data.link_report_flags) {
910 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
911 &cur_data.link_report_flags)) {
9f6c9258 912 pr_cont(", receive ");
2ae17f66
VZ
913 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
914 &cur_data.link_report_flags))
9f6c9258
DK
915 pr_cont("& transmit ");
916 } else {
917 pr_cont(", transmit ");
918 }
919 pr_cont("flow control ON");
920 }
921 pr_cont("\n");
9f6c9258
DK
922 }
923}
924
925void bnx2x_init_rx_rings(struct bnx2x *bp)
926{
927 int func = BP_FUNC(bp);
928 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
929 ETH_MAX_AGGREGATION_QUEUES_E1H;
523224a3 930 u16 ring_prod;
9f6c9258 931 int i, j;
25141580 932
b3b83c3f 933 /* Allocate TPA resources */
ec6ba945 934 for_each_rx_queue(bp, j) {
523224a3 935 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 936
a8c94b91
VZ
937 DP(NETIF_MSG_IFUP,
938 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
939
523224a3 940 if (!fp->disable_tpa) {
b3b83c3f 941 /* Fill the per-aggregation pool */
9f6c9258
DK
942 for (i = 0; i < max_agg_queues; i++) {
943 fp->tpa_pool[i].skb =
a8c94b91 944 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
9f6c9258
DK
945 if (!fp->tpa_pool[i].skb) {
946 BNX2X_ERR("Failed to allocate TPA "
947 "skb pool for queue[%d] - "
948 "disabling TPA on this "
949 "queue!\n", j);
950 bnx2x_free_tpa_pool(bp, fp, i);
951 fp->disable_tpa = 1;
952 break;
953 }
954 dma_unmap_addr_set((struct sw_rx_bd *)
955 &bp->fp->tpa_pool[i],
956 mapping, 0);
957 fp->tpa_state[i] = BNX2X_TPA_STOP;
958 }
523224a3
DK
959
960 /* "next page" elements initialization */
961 bnx2x_set_next_page_sgl(fp);
962
963 /* set SGEs bit mask */
964 bnx2x_init_sge_ring_bit_mask(fp);
965
966 /* Allocate SGEs and initialize the ring elements */
967 for (i = 0, ring_prod = 0;
968 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
969
970 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
971 BNX2X_ERR("was only able to allocate "
972 "%d rx sges\n", i);
973 BNX2X_ERR("disabling TPA for"
974 " queue[%d]\n", j);
975 /* Cleanup already allocated elements */
976 bnx2x_free_rx_sge_range(bp,
977 fp, ring_prod);
978 bnx2x_free_tpa_pool(bp,
979 fp, max_agg_queues);
980 fp->disable_tpa = 1;
981 ring_prod = 0;
982 break;
983 }
984 ring_prod = NEXT_SGE_IDX(ring_prod);
985 }
986
987 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
988 }
989 }
990
ec6ba945 991 for_each_rx_queue(bp, j) {
9f6c9258
DK
992 struct bnx2x_fastpath *fp = &bp->fp[j];
993
994 fp->rx_bd_cons = 0;
9f6c9258 995
b3b83c3f
DK
996 /* Activate BD ring */
997 /* Warning!
998 * this will generate an interrupt (to the TSTORM)
999 * must only be done after chip is initialized
1000 */
1001 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1002 fp->rx_sge_prod);
9f6c9258 1003
9f6c9258
DK
1004 if (j != 0)
1005 continue;
1006
f2e0899f
DK
1007 if (!CHIP_IS_E2(bp)) {
1008 REG_WR(bp, BAR_USTRORM_INTMEM +
1009 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1010 U64_LO(fp->rx_comp_mapping));
1011 REG_WR(bp, BAR_USTRORM_INTMEM +
1012 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1013 U64_HI(fp->rx_comp_mapping));
1014 }
9f6c9258
DK
1015 }
1016}
f85582f8 1017
9f6c9258
DK
1018static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1019{
1020 int i;
1021
ec6ba945 1022 for_each_tx_queue(bp, i) {
9f6c9258
DK
1023 struct bnx2x_fastpath *fp = &bp->fp[i];
1024
1025 u16 bd_cons = fp->tx_bd_cons;
1026 u16 sw_prod = fp->tx_pkt_prod;
1027 u16 sw_cons = fp->tx_pkt_cons;
1028
1029 while (sw_cons != sw_prod) {
1030 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1031 sw_cons++;
1032 }
1033 }
1034}
1035
b3b83c3f
DK
1036static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1037{
1038 struct bnx2x *bp = fp->bp;
1039 int i;
1040
1041 /* ring wasn't allocated */
1042 if (fp->rx_buf_ring == NULL)
1043 return;
1044
1045 for (i = 0; i < NUM_RX_BD; i++) {
1046 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1047 struct sk_buff *skb = rx_buf->skb;
1048
1049 if (skb == NULL)
1050 continue;
1051
1052 dma_unmap_single(&bp->pdev->dev,
1053 dma_unmap_addr(rx_buf, mapping),
1054 fp->rx_buf_size, DMA_FROM_DEVICE);
1055
1056 rx_buf->skb = NULL;
1057 dev_kfree_skb(skb);
1058 }
1059}
1060
9f6c9258
DK
1061static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1062{
b3b83c3f 1063 int j;
9f6c9258 1064
ec6ba945 1065 for_each_rx_queue(bp, j) {
9f6c9258
DK
1066 struct bnx2x_fastpath *fp = &bp->fp[j];
1067
b3b83c3f 1068 bnx2x_free_rx_bds(fp);
9f6c9258 1069
9f6c9258
DK
1070 if (!fp->disable_tpa)
1071 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1072 ETH_MAX_AGGREGATION_QUEUES_E1 :
1073 ETH_MAX_AGGREGATION_QUEUES_E1H);
1074 }
1075}
1076
1077void bnx2x_free_skbs(struct bnx2x *bp)
1078{
1079 bnx2x_free_tx_skbs(bp);
1080 bnx2x_free_rx_skbs(bp);
1081}
1082
e3835b99
DK
1083void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1084{
1085 /* load old values */
1086 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1087
1088 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1089 /* leave all but MAX value */
1090 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1091
1092 /* set new MAX value */
1093 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1094 & FUNC_MF_CFG_MAX_BW_MASK;
1095
1096 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1097 }
1098}
1099
9f6c9258
DK
1100static void bnx2x_free_msix_irqs(struct bnx2x *bp)
1101{
1102 int i, offset = 1;
1103
1104 free_irq(bp->msix_table[0].vector, bp->dev);
1105 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1106 bp->msix_table[0].vector);
1107
1108#ifdef BCM_CNIC
1109 offset++;
1110#endif
ec6ba945 1111 for_each_eth_queue(bp, i) {
9f6c9258
DK
1112 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
1113 "state %x\n", i, bp->msix_table[i + offset].vector,
1114 bnx2x_fp(bp, i, state));
1115
1116 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
1117 }
1118}
1119
d6214d7a 1120void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1121{
d6214d7a
DK
1122 if (bp->flags & USING_MSIX_FLAG)
1123 bnx2x_free_msix_irqs(bp);
1124 else if (bp->flags & USING_MSI_FLAG)
1125 free_irq(bp->pdev->irq, bp->dev);
1126 else
9f6c9258
DK
1127 free_irq(bp->pdev->irq, bp->dev);
1128}
1129
d6214d7a 1130int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1131{
d6214d7a 1132 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1133
d6214d7a
DK
1134 bp->msix_table[msix_vec].entry = msix_vec;
1135 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1136 bp->msix_table[0].entry);
1137 msix_vec++;
9f6c9258
DK
1138
1139#ifdef BCM_CNIC
d6214d7a
DK
1140 bp->msix_table[msix_vec].entry = msix_vec;
1141 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1142 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1143 msix_vec++;
9f6c9258 1144#endif
ec6ba945 1145 for_each_eth_queue(bp, i) {
d6214d7a 1146 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1147 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1148 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1149 msix_vec++;
9f6c9258
DK
1150 }
1151
ec6ba945 1152 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
d6214d7a
DK
1153
1154 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1155
1156 /*
1157 * reconfigure number of tx/rx queues according to available
1158 * MSI-X vectors
1159 */
1160 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1161 /* how less vectors we will have? */
1162 int diff = req_cnt - rc;
9f6c9258
DK
1163
1164 DP(NETIF_MSG_IFUP,
1165 "Trying to use less MSI-X vectors: %d\n", rc);
1166
1167 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1168
1169 if (rc) {
1170 DP(NETIF_MSG_IFUP,
1171 "MSI-X is not attainable rc %d\n", rc);
1172 return rc;
1173 }
d6214d7a
DK
1174 /*
1175 * decrease number of queues by number of unallocated entries
1176 */
1177 bp->num_queues -= diff;
9f6c9258
DK
1178
1179 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1180 bp->num_queues);
1181 } else if (rc) {
d6214d7a
DK
1182 /* fall to INTx if not enough memory */
1183 if (rc == -ENOMEM)
1184 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1185 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1186 return rc;
1187 }
1188
1189 bp->flags |= USING_MSIX_FLAG;
1190
1191 return 0;
1192}
1193
1194static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1195{
1196 int i, rc, offset = 1;
1197
1198 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1199 bp->dev->name, bp->dev);
1200 if (rc) {
1201 BNX2X_ERR("request sp irq failed\n");
1202 return -EBUSY;
1203 }
1204
1205#ifdef BCM_CNIC
1206 offset++;
1207#endif
ec6ba945 1208 for_each_eth_queue(bp, i) {
9f6c9258
DK
1209 struct bnx2x_fastpath *fp = &bp->fp[i];
1210 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1211 bp->dev->name, i);
1212
d6214d7a 1213 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1214 bnx2x_msix_fp_int, 0, fp->name, fp);
1215 if (rc) {
1216 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1217 bnx2x_free_msix_irqs(bp);
1218 return -EBUSY;
1219 }
1220
d6214d7a 1221 offset++;
9f6c9258
DK
1222 fp->state = BNX2X_FP_STATE_IRQ;
1223 }
1224
ec6ba945 1225 i = BNX2X_NUM_ETH_QUEUES(bp);
d6214d7a 1226 offset = 1 + CNIC_CONTEXT_USE;
9f6c9258
DK
1227 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1228 " ... fp[%d] %d\n",
1229 bp->msix_table[0].vector,
1230 0, bp->msix_table[offset].vector,
1231 i - 1, bp->msix_table[offset + i - 1].vector);
1232
1233 return 0;
1234}
1235
d6214d7a 1236int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1237{
1238 int rc;
1239
1240 rc = pci_enable_msi(bp->pdev);
1241 if (rc) {
1242 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1243 return -1;
1244 }
1245 bp->flags |= USING_MSI_FLAG;
1246
1247 return 0;
1248}
1249
1250static int bnx2x_req_irq(struct bnx2x *bp)
1251{
1252 unsigned long flags;
1253 int rc;
1254
1255 if (bp->flags & USING_MSI_FLAG)
1256 flags = 0;
1257 else
1258 flags = IRQF_SHARED;
1259
1260 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1261 bp->dev->name, bp->dev);
1262 if (!rc)
1263 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1264
1265 return rc;
1266}
1267
1268static void bnx2x_napi_enable(struct bnx2x *bp)
1269{
1270 int i;
1271
ec6ba945 1272 for_each_napi_queue(bp, i)
9f6c9258
DK
1273 napi_enable(&bnx2x_fp(bp, i, napi));
1274}
1275
1276static void bnx2x_napi_disable(struct bnx2x *bp)
1277{
1278 int i;
1279
ec6ba945 1280 for_each_napi_queue(bp, i)
9f6c9258
DK
1281 napi_disable(&bnx2x_fp(bp, i, napi));
1282}
1283
1284void bnx2x_netif_start(struct bnx2x *bp)
1285{
4b7ed897
DK
1286 if (netif_running(bp->dev)) {
1287 bnx2x_napi_enable(bp);
1288 bnx2x_int_enable(bp);
1289 if (bp->state == BNX2X_STATE_OPEN)
1290 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1291 }
1292}
1293
1294void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1295{
1296 bnx2x_int_disable_sync(bp, disable_hw);
1297 bnx2x_napi_disable(bp);
1298 netif_tx_disable(bp->dev);
1299}
9f6c9258 1300
8307fa3e
VZ
1301u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1302{
1303#ifdef BCM_CNIC
1304 struct bnx2x *bp = netdev_priv(dev);
1305 if (NO_FCOE(bp))
1306 return skb_tx_hash(dev, skb);
1307 else {
1308 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1309 u16 ether_type = ntohs(hdr->h_proto);
1310
1311 /* Skip VLAN tag if present */
1312 if (ether_type == ETH_P_8021Q) {
1313 struct vlan_ethhdr *vhdr =
1314 (struct vlan_ethhdr *)skb->data;
1315
1316 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1317 }
1318
1319 /* If ethertype is FCoE or FIP - use FCoE ring */
1320 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1321 return bnx2x_fcoe(bp, index);
1322 }
1323#endif
1324 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1325 */
1326 return __skb_tx_hash(dev, skb,
1327 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1328}
1329
d6214d7a
DK
1330void bnx2x_set_num_queues(struct bnx2x *bp)
1331{
1332 switch (bp->multi_mode) {
1333 case ETH_RSS_MODE_DISABLED:
9f6c9258 1334 bp->num_queues = 1;
d6214d7a
DK
1335 break;
1336 case ETH_RSS_MODE_REGULAR:
1337 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258 1338 break;
f85582f8 1339
9f6c9258 1340 default:
d6214d7a 1341 bp->num_queues = 1;
9f6c9258
DK
1342 break;
1343 }
ec6ba945
VZ
1344
1345 /* Add special queues */
1346 bp->num_queues += NONE_ETH_CONTEXT_USE;
1347}
1348
1349#ifdef BCM_CNIC
1350static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1351{
1352 if (!NO_FCOE(bp)) {
1353 if (!IS_MF_SD(bp))
1354 bnx2x_set_fip_eth_mac_addr(bp, 1);
1355 bnx2x_set_all_enode_macs(bp, 1);
1356 bp->flags |= FCOE_MACS_SET;
1357 }
9f6c9258 1358}
ec6ba945 1359#endif
9f6c9258 1360
6891dd25
DK
1361static void bnx2x_release_firmware(struct bnx2x *bp)
1362{
1363 kfree(bp->init_ops_offsets);
1364 kfree(bp->init_ops);
1365 kfree(bp->init_data);
1366 release_firmware(bp->firmware);
1367}
1368
ec6ba945
VZ
1369static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1370{
1371 int rc, num = bp->num_queues;
1372
1373#ifdef BCM_CNIC
1374 if (NO_FCOE(bp))
1375 num -= FCOE_CONTEXT_USE;
1376
1377#endif
1378 netif_set_real_num_tx_queues(bp->dev, num);
1379 rc = netif_set_real_num_rx_queues(bp->dev, num);
1380 return rc;
1381}
1382
a8c94b91
VZ
1383static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1384{
1385 int i;
1386
1387 for_each_queue(bp, i) {
1388 struct bnx2x_fastpath *fp = &bp->fp[i];
1389
1390 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1391 if (IS_FCOE_IDX(i))
1392 /*
1393 * Although there are no IP frames expected to arrive to
1394 * this ring we still want to add an
1395 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1396 * overrun attack.
1397 */
1398 fp->rx_buf_size =
1399 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1400 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1401 else
1402 fp->rx_buf_size =
1403 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1404 IP_HEADER_ALIGNMENT_PADDING;
1405 }
1406}
1407
9f6c9258
DK
1408/* must be called with rtnl_lock */
1409int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1410{
1411 u32 load_code;
1412 int i, rc;
1413
6891dd25
DK
1414 /* Set init arrays */
1415 rc = bnx2x_init_firmware(bp);
1416 if (rc) {
1417 BNX2X_ERR("Error loading firmware\n");
1418 return rc;
1419 }
1420
9f6c9258
DK
1421#ifdef BNX2X_STOP_ON_ERROR
1422 if (unlikely(bp->panic))
1423 return -EPERM;
1424#endif
1425
1426 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1427
2ae17f66
VZ
1428 /* Set the initial link reported state to link down */
1429 bnx2x_acquire_phy_lock(bp);
1430 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1431 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1432 &bp->last_reported_link.link_report_flags);
1433 bnx2x_release_phy_lock(bp);
1434
523224a3
DK
1435 /* must be called before memory allocation and HW init */
1436 bnx2x_ilt_set_info(bp);
1437
b3b83c3f
DK
1438 /* zero fastpath structures preserving invariants like napi which are
1439 * allocated only once
1440 */
1441 for_each_queue(bp, i)
1442 bnx2x_bz_fp(bp, i);
1443
a8c94b91
VZ
1444 /* Set the receive queues buffer size */
1445 bnx2x_set_rx_buf_size(bp);
1446
b3b83c3f
DK
1447 for_each_queue(bp, i)
1448 bnx2x_fp(bp, i, disable_tpa) =
1449 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1450
1451#ifdef BCM_CNIC
1452 /* We don't want TPA on FCoE L2 ring */
1453 bnx2x_fcoe(bp, disable_tpa) = 1;
1454#endif
1455
d6214d7a 1456 if (bnx2x_alloc_mem(bp))
9f6c9258 1457 return -ENOMEM;
d6214d7a 1458
b3b83c3f
DK
1459 /* As long as bnx2x_alloc_mem() may possibly update
1460 * bp->num_queues, bnx2x_set_real_num_queues() should always
1461 * come after it.
1462 */
ec6ba945 1463 rc = bnx2x_set_real_num_queues(bp);
d6214d7a 1464 if (rc) {
ec6ba945 1465 BNX2X_ERR("Unable to set real_num_queues\n");
d6214d7a 1466 goto load_error0;
9f6c9258
DK
1467 }
1468
9f6c9258
DK
1469 bnx2x_napi_enable(bp);
1470
9f6c9258
DK
1471 /* Send LOAD_REQUEST command to MCP
1472 Returns the type of LOAD command:
1473 if it is the first port to be initialized
1474 common blocks should be initialized, otherwise - not
1475 */
1476 if (!BP_NOMCP(bp)) {
a22f0788 1477 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1478 if (!load_code) {
1479 BNX2X_ERR("MCP response failure, aborting\n");
1480 rc = -EBUSY;
d6214d7a 1481 goto load_error1;
9f6c9258
DK
1482 }
1483 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1484 rc = -EBUSY; /* other port in diagnostic mode */
d6214d7a 1485 goto load_error1;
9f6c9258
DK
1486 }
1487
1488 } else {
f2e0899f 1489 int path = BP_PATH(bp);
9f6c9258
DK
1490 int port = BP_PORT(bp);
1491
f2e0899f
DK
1492 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1493 path, load_count[path][0], load_count[path][1],
1494 load_count[path][2]);
1495 load_count[path][0]++;
1496 load_count[path][1 + port]++;
1497 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1498 path, load_count[path][0], load_count[path][1],
1499 load_count[path][2]);
1500 if (load_count[path][0] == 1)
9f6c9258 1501 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1502 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1503 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1504 else
1505 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1506 }
1507
1508 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1509 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
9f6c9258
DK
1510 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1511 bp->port.pmf = 1;
1512 else
1513 bp->port.pmf = 0;
1514 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1515
1516 /* Initialize HW */
1517 rc = bnx2x_init_hw(bp, load_code);
1518 if (rc) {
1519 BNX2X_ERR("HW init failed, aborting\n");
a22f0788 1520 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1521 goto load_error2;
1522 }
1523
d6214d7a
DK
1524 /* Connect to IRQs */
1525 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1526 if (rc) {
1527 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1528 goto load_error2;
1529 }
1530
9f6c9258
DK
1531 /* Setup NIC internals and enable interrupts */
1532 bnx2x_nic_init(bp, load_code);
1533
f2e0899f
DK
1534 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1535 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
9f6c9258
DK
1536 (bp->common.shmem2_base))
1537 SHMEM2_WR(bp, dcc_support,
1538 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1539 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1540
1541 /* Send LOAD_DONE command to MCP */
1542 if (!BP_NOMCP(bp)) {
a22f0788 1543 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1544 if (!load_code) {
1545 BNX2X_ERR("MCP response failure, aborting\n");
1546 rc = -EBUSY;
1547 goto load_error3;
1548 }
1549 }
1550
e4901dde
VZ
1551 bnx2x_dcbx_init(bp);
1552
9f6c9258
DK
1553 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1554
523224a3
DK
1555 rc = bnx2x_func_start(bp);
1556 if (rc) {
1557 BNX2X_ERR("Function start failed!\n");
1558#ifndef BNX2X_STOP_ON_ERROR
1559 goto load_error3;
1560#else
1561 bp->panic = 1;
1562 return -EBUSY;
1563#endif
1564 }
1565
1566 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
9f6c9258
DK
1567 if (rc) {
1568 BNX2X_ERR("Setup leading failed!\n");
1569#ifndef BNX2X_STOP_ON_ERROR
1570 goto load_error3;
1571#else
1572 bp->panic = 1;
1573 return -EBUSY;
1574#endif
1575 }
1576
f2e0899f
DK
1577 if (!CHIP_IS_E1(bp) &&
1578 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1579 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1580 bp->flags |= MF_FUNC_DIS;
1581 }
9f6c9258 1582
9f6c9258 1583#ifdef BCM_CNIC
523224a3
DK
1584 /* Enable Timer scan */
1585 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
9f6c9258 1586#endif
f85582f8 1587
523224a3
DK
1588 for_each_nondefault_queue(bp, i) {
1589 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1590 if (rc)
9f6c9258 1591#ifdef BCM_CNIC
523224a3 1592 goto load_error4;
9f6c9258 1593#else
523224a3 1594 goto load_error3;
9f6c9258 1595#endif
523224a3
DK
1596 }
1597
1598 /* Now when Clients are configured we are ready to work */
1599 bp->state = BNX2X_STATE_OPEN;
1600
ec6ba945
VZ
1601#ifdef BCM_CNIC
1602 bnx2x_set_fcoe_eth_macs(bp);
1603#endif
1604
523224a3 1605 bnx2x_set_eth_mac(bp, 1);
9f6c9258 1606
6e30dd4e
VZ
1607 /* Clear MC configuration */
1608 if (CHIP_IS_E1(bp))
1609 bnx2x_invalidate_e1_mc_list(bp);
1610 else
1611 bnx2x_invalidate_e1h_mc_list(bp);
1612
1613 /* Clear UC lists configuration */
1614 bnx2x_invalidate_uc_list(bp);
1615
e3835b99
DK
1616 if (bp->pending_max) {
1617 bnx2x_update_max_mf_config(bp, bp->pending_max);
1618 bp->pending_max = 0;
1619 }
1620
9f6c9258
DK
1621 if (bp->port.pmf)
1622 bnx2x_initial_phy_init(bp, load_mode);
1623
6e30dd4e
VZ
1624 /* Initialize Rx filtering */
1625 bnx2x_set_rx_mode(bp->dev);
1626
9f6c9258
DK
1627 /* Start fast path */
1628 switch (load_mode) {
1629 case LOAD_NORMAL:
523224a3
DK
1630 /* Tx queue should be only reenabled */
1631 netif_tx_wake_all_queues(bp->dev);
9f6c9258 1632 /* Initialize the receive filter. */
9f6c9258
DK
1633 break;
1634
1635 case LOAD_OPEN:
1636 netif_tx_start_all_queues(bp->dev);
523224a3 1637 smp_mb__after_clear_bit();
9f6c9258
DK
1638 break;
1639
1640 case LOAD_DIAG:
9f6c9258
DK
1641 bp->state = BNX2X_STATE_DIAG;
1642 break;
1643
1644 default:
1645 break;
1646 }
1647
1648 if (!bp->port.pmf)
1649 bnx2x__link_status_update(bp);
1650
1651 /* start the timer */
1652 mod_timer(&bp->timer, jiffies + bp->current_interval);
1653
1654#ifdef BCM_CNIC
1655 bnx2x_setup_cnic_irq_info(bp);
1656 if (bp->state == BNX2X_STATE_OPEN)
1657 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1658#endif
1659 bnx2x_inc_load_cnt(bp);
1660
6891dd25
DK
1661 bnx2x_release_firmware(bp);
1662
9f6c9258
DK
1663 return 0;
1664
1665#ifdef BCM_CNIC
1666load_error4:
1667 /* Disable Timer scan */
1668 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1669#endif
1670load_error3:
1671 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1672
9f6c9258
DK
1673 /* Free SKBs, SGEs, TPA pool and driver internals */
1674 bnx2x_free_skbs(bp);
ec6ba945 1675 for_each_rx_queue(bp, i)
9f6c9258 1676 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1677
9f6c9258 1678 /* Release IRQs */
d6214d7a
DK
1679 bnx2x_free_irq(bp);
1680load_error2:
1681 if (!BP_NOMCP(bp)) {
1682 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1683 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1684 }
1685
1686 bp->port.pmf = 0;
9f6c9258
DK
1687load_error1:
1688 bnx2x_napi_disable(bp);
d6214d7a 1689load_error0:
9f6c9258
DK
1690 bnx2x_free_mem(bp);
1691
6891dd25
DK
1692 bnx2x_release_firmware(bp);
1693
9f6c9258
DK
1694 return rc;
1695}
1696
1697/* must be called with rtnl_lock */
1698int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1699{
1700 int i;
1701
1702 if (bp->state == BNX2X_STATE_CLOSED) {
1703 /* Interface has been removed - nothing to recover */
1704 bp->recovery_state = BNX2X_RECOVERY_DONE;
1705 bp->is_leader = 0;
1706 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1707 smp_wmb();
1708
1709 return -EINVAL;
1710 }
1711
1712#ifdef BCM_CNIC
1713 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1714#endif
1715 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1716
1717 /* Set "drop all" */
1718 bp->rx_mode = BNX2X_RX_MODE_NONE;
1719 bnx2x_set_storm_rx_mode(bp);
1720
f2e0899f
DK
1721 /* Stop Tx */
1722 bnx2x_tx_disable(bp);
f85582f8 1723
9f6c9258 1724 del_timer_sync(&bp->timer);
f85582f8 1725
f2e0899f 1726 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
9f6c9258 1727 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
9f6c9258 1728
f85582f8 1729 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9f6c9258
DK
1730
1731 /* Cleanup the chip if needed */
1732 if (unload_mode != UNLOAD_RECOVERY)
1733 bnx2x_chip_cleanup(bp, unload_mode);
523224a3
DK
1734 else {
1735 /* Disable HW interrupts, NAPI and Tx */
1736 bnx2x_netif_stop(bp, 1);
1737
1738 /* Release IRQs */
d6214d7a 1739 bnx2x_free_irq(bp);
523224a3 1740 }
9f6c9258
DK
1741
1742 bp->port.pmf = 0;
1743
1744 /* Free SKBs, SGEs, TPA pool and driver internals */
1745 bnx2x_free_skbs(bp);
ec6ba945 1746 for_each_rx_queue(bp, i)
9f6c9258 1747 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1748
9f6c9258
DK
1749 bnx2x_free_mem(bp);
1750
1751 bp->state = BNX2X_STATE_CLOSED;
1752
1753 /* The last driver must disable a "close the gate" if there is no
1754 * parity attention or "process kill" pending.
1755 */
1756 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1757 bnx2x_reset_is_done(bp))
1758 bnx2x_disable_close_the_gate(bp);
1759
1760 /* Reset MCP mail box sequence if there is on going recovery */
1761 if (unload_mode == UNLOAD_RECOVERY)
1762 bp->fw_seq = 0;
1763
1764 return 0;
1765}
f85582f8 1766
9f6c9258
DK
1767int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1768{
1769 u16 pmcsr;
1770
adf5f6a1
DK
1771 /* If there is no power capability, silently succeed */
1772 if (!bp->pm_cap) {
1773 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1774 return 0;
1775 }
1776
9f6c9258
DK
1777 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1778
1779 switch (state) {
1780 case PCI_D0:
1781 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1782 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1783 PCI_PM_CTRL_PME_STATUS));
1784
1785 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1786 /* delay required during transition out of D3hot */
1787 msleep(20);
1788 break;
1789
1790 case PCI_D3hot:
1791 /* If there are other clients above don't
1792 shut down the power */
1793 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1794 return 0;
1795 /* Don't shut down the power for emulation and FPGA */
1796 if (CHIP_REV_IS_SLOW(bp))
1797 return 0;
1798
1799 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1800 pmcsr |= 3;
1801
1802 if (bp->wol)
1803 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1804
1805 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1806 pmcsr);
1807
1808 /* No more memory access after this point until
1809 * device is brought back to D0.
1810 */
1811 break;
1812
1813 default:
1814 return -EINVAL;
1815 }
1816 return 0;
1817}
1818
9f6c9258
DK
1819/*
1820 * net_device service functions
1821 */
d6214d7a 1822int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
1823{
1824 int work_done = 0;
1825 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1826 napi);
1827 struct bnx2x *bp = fp->bp;
1828
1829 while (1) {
1830#ifdef BNX2X_STOP_ON_ERROR
1831 if (unlikely(bp->panic)) {
1832 napi_complete(napi);
1833 return 0;
1834 }
1835#endif
1836
1837 if (bnx2x_has_tx_work(fp))
1838 bnx2x_tx_int(fp);
1839
1840 if (bnx2x_has_rx_work(fp)) {
1841 work_done += bnx2x_rx_int(fp, budget - work_done);
1842
1843 /* must not complete if we consumed full budget */
1844 if (work_done >= budget)
1845 break;
1846 }
1847
1848 /* Fall out from the NAPI loop if needed */
1849 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
ec6ba945
VZ
1850#ifdef BCM_CNIC
1851 /* No need to update SB for FCoE L2 ring as long as
1852 * it's connected to the default SB and the SB
1853 * has been updated when NAPI was scheduled.
1854 */
1855 if (IS_FCOE_FP(fp)) {
1856 napi_complete(napi);
1857 break;
1858 }
1859#endif
1860
9f6c9258 1861 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
1862 /* bnx2x_has_rx_work() reads the status block,
1863 * thus we need to ensure that status block indices
1864 * have been actually read (bnx2x_update_fpsb_idx)
1865 * prior to this check (bnx2x_has_rx_work) so that
1866 * we won't write the "newer" value of the status block
1867 * to IGU (if there was a DMA right after
1868 * bnx2x_has_rx_work and if there is no rmb, the memory
1869 * reading (bnx2x_update_fpsb_idx) may be postponed
1870 * to right before bnx2x_ack_sb). In this case there
1871 * will never be another interrupt until there is
1872 * another update of the status block, while there
1873 * is still unhandled work.
1874 */
9f6c9258
DK
1875 rmb();
1876
1877 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1878 napi_complete(napi);
1879 /* Re-enable interrupts */
523224a3
DK
1880 DP(NETIF_MSG_HW,
1881 "Update index to %d\n", fp->fp_hc_idx);
1882 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1883 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
1884 IGU_INT_ENABLE, 1);
1885 break;
1886 }
1887 }
1888 }
1889
1890 return work_done;
1891}
1892
9f6c9258
DK
1893/* we split the first BD into headers and data BDs
1894 * to ease the pain of our fellow microcode engineers
1895 * we use one mapping for both BDs
1896 * So far this has only been observed to happen
1897 * in Other Operating Systems(TM)
1898 */
1899static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1900 struct bnx2x_fastpath *fp,
1901 struct sw_tx_bd *tx_buf,
1902 struct eth_tx_start_bd **tx_bd, u16 hlen,
1903 u16 bd_prod, int nbd)
1904{
1905 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1906 struct eth_tx_bd *d_tx_bd;
1907 dma_addr_t mapping;
1908 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1909
1910 /* first fix first BD */
1911 h_tx_bd->nbd = cpu_to_le16(nbd);
1912 h_tx_bd->nbytes = cpu_to_le16(hlen);
1913
1914 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1915 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1916 h_tx_bd->addr_lo, h_tx_bd->nbd);
1917
1918 /* now get a new data BD
1919 * (after the pbd) and fill it */
1920 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1921 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1922
1923 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1924 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1925
1926 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1927 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1928 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1929
1930 /* this marks the BD as one that has no individual mapping */
1931 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1932
1933 DP(NETIF_MSG_TX_QUEUED,
1934 "TSO split data size is %d (%x:%x)\n",
1935 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1936
1937 /* update tx_bd */
1938 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1939
1940 return bd_prod;
1941}
1942
1943static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1944{
1945 if (fix > 0)
1946 csum = (u16) ~csum_fold(csum_sub(csum,
1947 csum_partial(t_header - fix, fix, 0)));
1948
1949 else if (fix < 0)
1950 csum = (u16) ~csum_fold(csum_add(csum,
1951 csum_partial(t_header, -fix, 0)));
1952
1953 return swab16(csum);
1954}
1955
1956static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1957{
1958 u32 rc;
1959
1960 if (skb->ip_summed != CHECKSUM_PARTIAL)
1961 rc = XMIT_PLAIN;
1962
1963 else {
d0d9d8ef 1964 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
1965 rc = XMIT_CSUM_V6;
1966 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1967 rc |= XMIT_CSUM_TCP;
1968
1969 } else {
1970 rc = XMIT_CSUM_V4;
1971 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1972 rc |= XMIT_CSUM_TCP;
1973 }
1974 }
1975
5892b9e9
VZ
1976 if (skb_is_gso_v6(skb))
1977 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1978 else if (skb_is_gso(skb))
1979 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
1980
1981 return rc;
1982}
1983
1984#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1985/* check if packet requires linearization (packet is too fragmented)
1986 no need to check fragmentation if page size > 8K (there will be no
1987 violation to FW restrictions) */
1988static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1989 u32 xmit_type)
1990{
1991 int to_copy = 0;
1992 int hlen = 0;
1993 int first_bd_sz = 0;
1994
1995 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1996 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1997
1998 if (xmit_type & XMIT_GSO) {
1999 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2000 /* Check if LSO packet needs to be copied:
2001 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2002 int wnd_size = MAX_FETCH_BD - 3;
2003 /* Number of windows to check */
2004 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2005 int wnd_idx = 0;
2006 int frag_idx = 0;
2007 u32 wnd_sum = 0;
2008
2009 /* Headers length */
2010 hlen = (int)(skb_transport_header(skb) - skb->data) +
2011 tcp_hdrlen(skb);
2012
2013 /* Amount of data (w/o headers) on linear part of SKB*/
2014 first_bd_sz = skb_headlen(skb) - hlen;
2015
2016 wnd_sum = first_bd_sz;
2017
2018 /* Calculate the first sum - it's special */
2019 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2020 wnd_sum +=
2021 skb_shinfo(skb)->frags[frag_idx].size;
2022
2023 /* If there was data on linear skb data - check it */
2024 if (first_bd_sz > 0) {
2025 if (unlikely(wnd_sum < lso_mss)) {
2026 to_copy = 1;
2027 goto exit_lbl;
2028 }
2029
2030 wnd_sum -= first_bd_sz;
2031 }
2032
2033 /* Others are easier: run through the frag list and
2034 check all windows */
2035 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2036 wnd_sum +=
2037 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2038
2039 if (unlikely(wnd_sum < lso_mss)) {
2040 to_copy = 1;
2041 break;
2042 }
2043 wnd_sum -=
2044 skb_shinfo(skb)->frags[wnd_idx].size;
2045 }
2046 } else {
2047 /* in non-LSO too fragmented packet should always
2048 be linearized */
2049 to_copy = 1;
2050 }
2051 }
2052
2053exit_lbl:
2054 if (unlikely(to_copy))
2055 DP(NETIF_MSG_TX_QUEUED,
2056 "Linearization IS REQUIRED for %s packet. "
2057 "num_frags %d hlen %d first_bd_sz %d\n",
2058 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2059 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2060
2061 return to_copy;
2062}
2063#endif
2064
2297a2da
VZ
2065static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2066 u32 xmit_type)
f2e0899f 2067{
2297a2da
VZ
2068 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2069 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2070 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
2071 if ((xmit_type & XMIT_GSO_V6) &&
2072 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 2073 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
2074}
2075
2076/**
e8920674 2077 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 2078 *
e8920674
DK
2079 * @skb: packet skb
2080 * @pbd: parse BD
2081 * @xmit_type: xmit flags
f2e0899f
DK
2082 */
2083static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2084 struct eth_tx_parse_bd_e1x *pbd,
2085 u32 xmit_type)
2086{
2087 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2088 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2089 pbd->tcp_flags = pbd_tcp_flags(skb);
2090
2091 if (xmit_type & XMIT_GSO_V4) {
2092 pbd->ip_id = swab16(ip_hdr(skb)->id);
2093 pbd->tcp_pseudo_csum =
2094 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2095 ip_hdr(skb)->daddr,
2096 0, IPPROTO_TCP, 0));
2097
2098 } else
2099 pbd->tcp_pseudo_csum =
2100 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2101 &ipv6_hdr(skb)->daddr,
2102 0, IPPROTO_TCP, 0));
2103
2104 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2105}
f85582f8 2106
f2e0899f 2107/**
e8920674 2108 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 2109 *
e8920674
DK
2110 * @bp: driver handle
2111 * @skb: packet skb
2112 * @parsing_data: data to be updated
2113 * @xmit_type: xmit flags
f2e0899f 2114 *
e8920674 2115 * 57712 related
f2e0899f
DK
2116 */
2117static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 2118 u32 *parsing_data, u32 xmit_type)
f2e0899f 2119{
e39aece7
VZ
2120 *parsing_data |=
2121 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2122 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2123 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 2124
e39aece7
VZ
2125 if (xmit_type & XMIT_CSUM_TCP) {
2126 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2127 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2128 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 2129
e39aece7
VZ
2130 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2131 } else
2132 /* We support checksum offload for TCP and UDP only.
2133 * No need to pass the UDP header length - it's a constant.
2134 */
2135 return skb_transport_header(skb) +
2136 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
2137}
2138
2139/**
e8920674 2140 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 2141 *
e8920674
DK
2142 * @bp: driver handle
2143 * @skb: packet skb
2144 * @pbd: parse BD to be updated
2145 * @xmit_type: xmit flags
f2e0899f
DK
2146 */
2147static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2148 struct eth_tx_parse_bd_e1x *pbd,
2149 u32 xmit_type)
2150{
e39aece7 2151 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
2152
2153 /* for now NS flag is not used in Linux */
2154 pbd->global_data =
2155 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2156 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2157
2158 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 2159 skb_network_header(skb)) >> 1;
f2e0899f 2160
e39aece7
VZ
2161 hlen += pbd->ip_hlen_w;
2162
2163 /* We support checksum offload for TCP and UDP only */
2164 if (xmit_type & XMIT_CSUM_TCP)
2165 hlen += tcp_hdrlen(skb) / 2;
2166 else
2167 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
2168
2169 pbd->total_hlen_w = cpu_to_le16(hlen);
2170 hlen = hlen*2;
2171
2172 if (xmit_type & XMIT_CSUM_TCP) {
2173 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2174
2175 } else {
2176 s8 fix = SKB_CS_OFF(skb); /* signed! */
2177
2178 DP(NETIF_MSG_TX_QUEUED,
2179 "hlen %d fix %d csum before fix %x\n",
2180 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2181
2182 /* HW bug: fixup the CSUM */
2183 pbd->tcp_pseudo_csum =
2184 bnx2x_csum_fix(skb_transport_header(skb),
2185 SKB_CS(skb), fix);
2186
2187 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2188 pbd->tcp_pseudo_csum);
2189 }
2190
2191 return hlen;
2192}
f85582f8 2193
9f6c9258
DK
2194/* called with netif_tx_lock
2195 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2196 * netif_wake_queue()
2197 */
2198netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2199{
2200 struct bnx2x *bp = netdev_priv(dev);
2201 struct bnx2x_fastpath *fp;
2202 struct netdev_queue *txq;
2203 struct sw_tx_bd *tx_buf;
2204 struct eth_tx_start_bd *tx_start_bd;
2205 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 2206 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 2207 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 2208 u32 pbd_e2_parsing_data = 0;
9f6c9258
DK
2209 u16 pkt_prod, bd_prod;
2210 int nbd, fp_index;
2211 dma_addr_t mapping;
2212 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2213 int i;
2214 u8 hlen = 0;
2215 __le16 pkt_size = 0;
2216 struct ethhdr *eth;
2217 u8 mac_type = UNICAST_ADDRESS;
2218
2219#ifdef BNX2X_STOP_ON_ERROR
2220 if (unlikely(bp->panic))
2221 return NETDEV_TX_BUSY;
2222#endif
2223
2224 fp_index = skb_get_queue_mapping(skb);
2225 txq = netdev_get_tx_queue(dev, fp_index);
2226
2227 fp = &bp->fp[fp_index];
2228
2229 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2230 fp->eth_q_stats.driver_xoff++;
2231 netif_tx_stop_queue(txq);
2232 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2233 return NETDEV_TX_BUSY;
2234 }
2235
f2e0899f
DK
2236 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2237 "protocol(%x,%x) gso type %x xmit_type %x\n",
2238 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
2239 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2240
2241 eth = (struct ethhdr *)skb->data;
2242
2243 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2244 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2245 if (is_broadcast_ether_addr(eth->h_dest))
2246 mac_type = BROADCAST_ADDRESS;
2247 else
2248 mac_type = MULTICAST_ADDRESS;
2249 }
2250
2251#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2252 /* First, check if we need to linearize the skb (due to FW
2253 restrictions). No need to check fragmentation if page size > 8K
2254 (there will be no violation to FW restrictions) */
2255 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2256 /* Statistics of linearization */
2257 bp->lin_cnt++;
2258 if (skb_linearize(skb) != 0) {
2259 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2260 "silently dropping this SKB\n");
2261 dev_kfree_skb_any(skb);
2262 return NETDEV_TX_OK;
2263 }
2264 }
2265#endif
2266
2267 /*
2268 Please read carefully. First we use one BD which we mark as start,
2269 then we have a parsing info BD (used for TSO or xsum),
2270 and only then we have the rest of the TSO BDs.
2271 (don't forget to mark the last one as last,
2272 and to unmap only AFTER you write to the BD ...)
2273 And above all, all pdb sizes are in words - NOT DWORDS!
2274 */
2275
2276 pkt_prod = fp->tx_pkt_prod++;
2277 bd_prod = TX_BD(fp->tx_bd_prod);
2278
2279 /* get a tx_buf and first BD */
2280 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2281 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2282
2283 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2284 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2285 mac_type);
2286
9f6c9258 2287 /* header nbd */
f85582f8 2288 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2289
2290 /* remember the first BD of the packet */
2291 tx_buf->first_bd = fp->tx_bd_prod;
2292 tx_buf->skb = skb;
2293 tx_buf->flags = 0;
2294
2295 DP(NETIF_MSG_TX_QUEUED,
2296 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2297 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2298
eab6d18d 2299 if (vlan_tx_tag_present(skb)) {
523224a3
DK
2300 tx_start_bd->vlan_or_ethertype =
2301 cpu_to_le16(vlan_tx_tag_get(skb));
2302 tx_start_bd->bd_flags.as_bitfield |=
2303 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 2304 } else
523224a3 2305 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2306
2307 /* turn on parsing and get a BD */
2308 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2309
523224a3
DK
2310 if (xmit_type & XMIT_CSUM) {
2311 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2312
2313 if (xmit_type & XMIT_CSUM_V4)
2314 tx_start_bd->bd_flags.as_bitfield |=
2315 ETH_TX_BD_FLAGS_IP_CSUM;
2316 else
2317 tx_start_bd->bd_flags.as_bitfield |=
2318 ETH_TX_BD_FLAGS_IPV6;
9f6c9258 2319
523224a3
DK
2320 if (!(xmit_type & XMIT_CSUM_TCP))
2321 tx_start_bd->bd_flags.as_bitfield |=
2322 ETH_TX_BD_FLAGS_IS_UDP;
2323 }
9f6c9258 2324
f2e0899f
DK
2325 if (CHIP_IS_E2(bp)) {
2326 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2327 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2328 /* Set PBD in checksum offload case */
2329 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
2330 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2331 &pbd_e2_parsing_data,
2332 xmit_type);
f2e0899f
DK
2333 } else {
2334 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2335 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2336 /* Set PBD in checksum offload case */
2337 if (xmit_type & XMIT_CSUM)
2338 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2339
9f6c9258
DK
2340 }
2341
f85582f8 2342 /* Map skb linear data for DMA */
9f6c9258
DK
2343 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2344 skb_headlen(skb), DMA_TO_DEVICE);
2345
f85582f8 2346 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
2347 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2348 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2349 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2350 tx_start_bd->nbd = cpu_to_le16(nbd);
2351 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2352 pkt_size = tx_start_bd->nbytes;
2353
2354 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2355 " nbytes %d flags %x vlan %x\n",
2356 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2357 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2358 tx_start_bd->bd_flags.as_bitfield,
2359 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2360
2361 if (xmit_type & XMIT_GSO) {
2362
2363 DP(NETIF_MSG_TX_QUEUED,
2364 "TSO packet len %d hlen %d total len %d tso size %d\n",
2365 skb->len, hlen, skb_headlen(skb),
2366 skb_shinfo(skb)->gso_size);
2367
2368 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2369
2370 if (unlikely(skb_headlen(skb) > hlen))
2371 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2372 hlen, bd_prod, ++nbd);
f2e0899f 2373 if (CHIP_IS_E2(bp))
2297a2da
VZ
2374 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2375 xmit_type);
f2e0899f
DK
2376 else
2377 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 2378 }
2297a2da
VZ
2379
2380 /* Set the PBD's parsing_data field if not zero
2381 * (for the chips newer than 57711).
2382 */
2383 if (pbd_e2_parsing_data)
2384 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2385
9f6c9258
DK
2386 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2387
f85582f8 2388 /* Handle fragmented skb */
9f6c9258
DK
2389 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2390 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2391
2392 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2393 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2394 if (total_pkt_bd == NULL)
2395 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2396
2397 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2398 frag->page_offset,
2399 frag->size, DMA_TO_DEVICE);
2400
2401 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2402 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2403 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2404 le16_add_cpu(&pkt_size, frag->size);
2405
2406 DP(NETIF_MSG_TX_QUEUED,
2407 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2408 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2409 le16_to_cpu(tx_data_bd->nbytes));
2410 }
2411
2412 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2413
2414 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2415
2416 /* now send a tx doorbell, counting the next BD
2417 * if the packet contains or ends with it
2418 */
2419 if (TX_BD_POFF(bd_prod) < nbd)
2420 nbd++;
2421
2422 if (total_pkt_bd != NULL)
2423 total_pkt_bd->total_pkt_bytes = pkt_size;
2424
523224a3 2425 if (pbd_e1x)
9f6c9258 2426 DP(NETIF_MSG_TX_QUEUED,
523224a3 2427 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2428 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2429 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2430 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2431 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2432 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2433 if (pbd_e2)
2434 DP(NETIF_MSG_TX_QUEUED,
2435 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2436 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2437 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2438 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2439 pbd_e2->parsing_data);
9f6c9258
DK
2440 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2441
2442 /*
2443 * Make sure that the BD data is updated before updating the producer
2444 * since FW might read the BD right after the producer is updated.
2445 * This is only applicable for weak-ordered memory model archs such
2446 * as IA-64. The following barrier is also mandatory since FW will
2447 * assumes packets must have BDs.
2448 */
2449 wmb();
2450
2451 fp->tx_db.data.prod += nbd;
2452 barrier();
f85582f8 2453
523224a3 2454 DOORBELL(bp, fp->cid, fp->tx_db.raw);
9f6c9258
DK
2455
2456 mmiowb();
2457
2458 fp->tx_bd_prod += nbd;
2459
2460 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2461 netif_tx_stop_queue(txq);
2462
2463 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2464 * ordering of set_bit() in netif_tx_stop_queue() and read of
2465 * fp->bd_tx_cons */
2466 smp_mb();
2467
2468 fp->eth_q_stats.driver_xoff++;
2469 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2470 netif_tx_wake_queue(txq);
2471 }
2472 fp->tx_pkt++;
2473
2474 return NETDEV_TX_OK;
2475}
f85582f8 2476
9f6c9258
DK
2477/* called with rtnl_lock */
2478int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2479{
2480 struct sockaddr *addr = p;
2481 struct bnx2x *bp = netdev_priv(dev);
2482
2483 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2484 return -EINVAL;
2485
2486 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
523224a3
DK
2487 if (netif_running(dev))
2488 bnx2x_set_eth_mac(bp, 1);
9f6c9258
DK
2489
2490 return 0;
2491}
2492
b3b83c3f
DK
2493static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2494{
2495 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2496 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2497
2498 /* Common */
2499#ifdef BCM_CNIC
2500 if (IS_FCOE_IDX(fp_index)) {
2501 memset(sb, 0, sizeof(union host_hc_status_block));
2502 fp->status_blk_mapping = 0;
2503
2504 } else {
2505#endif
2506 /* status blocks */
2507 if (CHIP_IS_E2(bp))
2508 BNX2X_PCI_FREE(sb->e2_sb,
2509 bnx2x_fp(bp, fp_index,
2510 status_blk_mapping),
2511 sizeof(struct host_hc_status_block_e2));
2512 else
2513 BNX2X_PCI_FREE(sb->e1x_sb,
2514 bnx2x_fp(bp, fp_index,
2515 status_blk_mapping),
2516 sizeof(struct host_hc_status_block_e1x));
2517#ifdef BCM_CNIC
2518 }
2519#endif
2520 /* Rx */
2521 if (!skip_rx_queue(bp, fp_index)) {
2522 bnx2x_free_rx_bds(fp);
2523
2524 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2525 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2526 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2527 bnx2x_fp(bp, fp_index, rx_desc_mapping),
2528 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2529
2530 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2531 bnx2x_fp(bp, fp_index, rx_comp_mapping),
2532 sizeof(struct eth_fast_path_rx_cqe) *
2533 NUM_RCQ_BD);
2534
2535 /* SGE ring */
2536 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2537 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2538 bnx2x_fp(bp, fp_index, rx_sge_mapping),
2539 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2540 }
2541
2542 /* Tx */
2543 if (!skip_tx_queue(bp, fp_index)) {
2544 /* fastpath tx rings: tx_buf tx_desc */
2545 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2546 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2547 bnx2x_fp(bp, fp_index, tx_desc_mapping),
2548 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2549 }
2550 /* end of fastpath */
2551}
2552
2553void bnx2x_free_fp_mem(struct bnx2x *bp)
2554{
2555 int i;
2556 for_each_queue(bp, i)
2557 bnx2x_free_fp_mem_at(bp, i);
2558}
2559
2560static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2561{
2562 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2563 if (CHIP_IS_E2(bp)) {
2564 bnx2x_fp(bp, index, sb_index_values) =
2565 (__le16 *)status_blk.e2_sb->sb.index_values;
2566 bnx2x_fp(bp, index, sb_running_index) =
2567 (__le16 *)status_blk.e2_sb->sb.running_index;
2568 } else {
2569 bnx2x_fp(bp, index, sb_index_values) =
2570 (__le16 *)status_blk.e1x_sb->sb.index_values;
2571 bnx2x_fp(bp, index, sb_running_index) =
2572 (__le16 *)status_blk.e1x_sb->sb.running_index;
2573 }
2574}
2575
2576static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2577{
2578 union host_hc_status_block *sb;
2579 struct bnx2x_fastpath *fp = &bp->fp[index];
2580 int ring_size = 0;
2581
2582 /* if rx_ring_size specified - use it */
2583 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2584 MAX_RX_AVAIL/bp->num_queues;
2585
2586 /* allocate at least number of buffers required by FW */
2587 rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2588 MIN_RX_SIZE_TPA,
2589 rx_ring_size);
2590
2591 bnx2x_fp(bp, index, bp) = bp;
2592 bnx2x_fp(bp, index, index) = index;
2593
2594 /* Common */
2595 sb = &bnx2x_fp(bp, index, status_blk);
2596#ifdef BCM_CNIC
2597 if (!IS_FCOE_IDX(index)) {
2598#endif
2599 /* status blocks */
2600 if (CHIP_IS_E2(bp))
2601 BNX2X_PCI_ALLOC(sb->e2_sb,
2602 &bnx2x_fp(bp, index, status_blk_mapping),
2603 sizeof(struct host_hc_status_block_e2));
2604 else
2605 BNX2X_PCI_ALLOC(sb->e1x_sb,
2606 &bnx2x_fp(bp, index, status_blk_mapping),
2607 sizeof(struct host_hc_status_block_e1x));
2608#ifdef BCM_CNIC
2609 }
2610#endif
8eef2af1
DK
2611
2612 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
2613 * set shortcuts for it.
2614 */
2615 if (!IS_FCOE_IDX(index))
2616 set_sb_shortcuts(bp, index);
b3b83c3f
DK
2617
2618 /* Tx */
2619 if (!skip_tx_queue(bp, index)) {
2620 /* fastpath tx rings: tx_buf tx_desc */
2621 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2622 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2623 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2624 &bnx2x_fp(bp, index, tx_desc_mapping),
2625 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2626 }
2627
2628 /* Rx */
2629 if (!skip_rx_queue(bp, index)) {
2630 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2631 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2632 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2633 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2634 &bnx2x_fp(bp, index, rx_desc_mapping),
2635 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2636
2637 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2638 &bnx2x_fp(bp, index, rx_comp_mapping),
2639 sizeof(struct eth_fast_path_rx_cqe) *
2640 NUM_RCQ_BD);
2641
2642 /* SGE ring */
2643 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2644 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2645 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2646 &bnx2x_fp(bp, index, rx_sge_mapping),
2647 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2648 /* RX BD ring */
2649 bnx2x_set_next_page_rx_bd(fp);
2650
2651 /* CQ ring */
2652 bnx2x_set_next_page_rx_cq(fp);
2653
2654 /* BDs */
2655 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2656 if (ring_size < rx_ring_size)
2657 goto alloc_mem_err;
2658 }
2659
2660 return 0;
2661
2662/* handles low memory cases */
2663alloc_mem_err:
2664 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2665 index, ring_size);
2666 /* FW will drop all packets if queue is not big enough,
2667 * In these cases we disable the queue
2668 * Min size diferent for TPA and non-TPA queues
2669 */
2670 if (ring_size < (fp->disable_tpa ?
eb722d7a 2671 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
2672 /* release memory allocated for this queue */
2673 bnx2x_free_fp_mem_at(bp, index);
2674 return -ENOMEM;
2675 }
2676 return 0;
2677}
2678
2679int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2680{
2681 int i;
2682
2683 /**
2684 * 1. Allocate FP for leading - fatal if error
2685 * 2. {CNIC} Allocate FCoE FP - fatal if error
2686 * 3. Allocate RSS - fix number of queues if error
2687 */
2688
2689 /* leading */
2690 if (bnx2x_alloc_fp_mem_at(bp, 0))
2691 return -ENOMEM;
2692#ifdef BCM_CNIC
8eef2af1
DK
2693 if (!NO_FCOE(bp))
2694 /* FCoE */
2695 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2696 /* we will fail load process instead of mark
2697 * NO_FCOE_FLAG
2698 */
2699 return -ENOMEM;
b3b83c3f
DK
2700#endif
2701 /* RSS */
2702 for_each_nondefault_eth_queue(bp, i)
2703 if (bnx2x_alloc_fp_mem_at(bp, i))
2704 break;
2705
2706 /* handle memory failures */
2707 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2708 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2709
2710 WARN_ON(delta < 0);
2711#ifdef BCM_CNIC
2712 /**
2713 * move non eth FPs next to last eth FP
2714 * must be done in that order
2715 * FCOE_IDX < FWD_IDX < OOO_IDX
2716 */
2717
2718 /* move FCoE fp */
2719 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
2720#endif
2721 bp->num_queues -= delta;
2722 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2723 bp->num_queues + delta, bp->num_queues);
2724 }
2725
2726 return 0;
2727}
d6214d7a 2728
8d96286a 2729static int bnx2x_setup_irqs(struct bnx2x *bp)
d6214d7a
DK
2730{
2731 int rc = 0;
2732 if (bp->flags & USING_MSIX_FLAG) {
2733 rc = bnx2x_req_msix_irqs(bp);
2734 if (rc)
2735 return rc;
2736 } else {
2737 bnx2x_ack_int(bp);
2738 rc = bnx2x_req_irq(bp);
2739 if (rc) {
2740 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2741 return rc;
2742 }
2743 if (bp->flags & USING_MSI_FLAG) {
2744 bp->dev->irq = bp->pdev->irq;
2745 netdev_info(bp->dev, "using MSI IRQ %d\n",
2746 bp->pdev->irq);
2747 }
2748 }
2749
2750 return 0;
2751}
2752
523224a3
DK
2753void bnx2x_free_mem_bp(struct bnx2x *bp)
2754{
2755 kfree(bp->fp);
2756 kfree(bp->msix_table);
2757 kfree(bp->ilt);
2758}
2759
2760int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2761{
2762 struct bnx2x_fastpath *fp;
2763 struct msix_entry *tbl;
2764 struct bnx2x_ilt *ilt;
2765
2766 /* fp array */
2767 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2768 if (!fp)
2769 goto alloc_err;
2770 bp->fp = fp;
2771
2772 /* msix table */
ec6ba945 2773 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
523224a3
DK
2774 GFP_KERNEL);
2775 if (!tbl)
2776 goto alloc_err;
2777 bp->msix_table = tbl;
2778
2779 /* ilt */
2780 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2781 if (!ilt)
2782 goto alloc_err;
2783 bp->ilt = ilt;
2784
2785 return 0;
2786alloc_err:
2787 bnx2x_free_mem_bp(bp);
2788 return -ENOMEM;
2789
2790}
2791
66371c44
MM
2792static int bnx2x_reload_if_running(struct net_device *dev)
2793{
2794 struct bnx2x *bp = netdev_priv(dev);
2795
2796 if (unlikely(!netif_running(dev)))
2797 return 0;
2798
2799 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2800 return bnx2x_nic_load(bp, LOAD_NORMAL);
2801}
2802
1ac9e428
YR
2803int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
2804{
2805 u32 sel_phy_idx = 0;
2806 if (bp->link_params.num_phys <= 1)
2807 return INT_PHY;
2808
2809 if (bp->link_vars.link_up) {
2810 sel_phy_idx = EXT_PHY1;
2811 /* In case link is SERDES, check if the EXT_PHY2 is the one */
2812 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
2813 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
2814 sel_phy_idx = EXT_PHY2;
2815 } else {
2816
2817 switch (bnx2x_phy_selection(&bp->link_params)) {
2818 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
2819 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
2820 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
2821 sel_phy_idx = EXT_PHY1;
2822 break;
2823 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
2824 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
2825 sel_phy_idx = EXT_PHY2;
2826 break;
2827 }
2828 }
2829
2830 return sel_phy_idx;
2831
2832}
2833int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
2834{
2835 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
2836 /*
2837 * The selected actived PHY is always after swapping (in case PHY
2838 * swapping is enabled). So when swapping is enabled, we need to reverse
2839 * the configuration
2840 */
2841
2842 if (bp->link_params.multi_phy_config &
2843 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
2844 if (sel_phy_idx == EXT_PHY1)
2845 sel_phy_idx = EXT_PHY2;
2846 else if (sel_phy_idx == EXT_PHY2)
2847 sel_phy_idx = EXT_PHY1;
2848 }
2849 return LINK_CONFIG_IDX(sel_phy_idx);
2850}
2851
9f6c9258
DK
2852/* called with rtnl_lock */
2853int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2854{
2855 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
2856
2857 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2858 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2859 return -EAGAIN;
2860 }
2861
2862 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2863 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2864 return -EINVAL;
2865
2866 /* This does not race with packet allocation
2867 * because the actual alloc size is
2868 * only updated as part of load
2869 */
2870 dev->mtu = new_mtu;
2871
66371c44
MM
2872 return bnx2x_reload_if_running(dev);
2873}
2874
2875u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2876{
2877 struct bnx2x *bp = netdev_priv(dev);
2878
2879 /* TPA requires Rx CSUM offloading */
2880 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2881 features &= ~NETIF_F_LRO;
2882
2883 return features;
2884}
2885
2886int bnx2x_set_features(struct net_device *dev, u32 features)
2887{
2888 struct bnx2x *bp = netdev_priv(dev);
2889 u32 flags = bp->flags;
538dd2e3 2890 bool bnx2x_reload = false;
66371c44
MM
2891
2892 if (features & NETIF_F_LRO)
2893 flags |= TPA_ENABLE_FLAG;
2894 else
2895 flags &= ~TPA_ENABLE_FLAG;
2896
538dd2e3
MB
2897 if (features & NETIF_F_LOOPBACK) {
2898 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
2899 bp->link_params.loopback_mode = LOOPBACK_BMAC;
2900 bnx2x_reload = true;
2901 }
2902 } else {
2903 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
2904 bp->link_params.loopback_mode = LOOPBACK_NONE;
2905 bnx2x_reload = true;
2906 }
2907 }
2908
66371c44
MM
2909 if (flags ^ bp->flags) {
2910 bp->flags = flags;
538dd2e3
MB
2911 bnx2x_reload = true;
2912 }
66371c44 2913
538dd2e3 2914 if (bnx2x_reload) {
66371c44
MM
2915 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2916 return bnx2x_reload_if_running(dev);
2917 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
2918 }
2919
66371c44 2920 return 0;
9f6c9258
DK
2921}
2922
2923void bnx2x_tx_timeout(struct net_device *dev)
2924{
2925 struct bnx2x *bp = netdev_priv(dev);
2926
2927#ifdef BNX2X_STOP_ON_ERROR
2928 if (!bp->panic)
2929 bnx2x_panic();
2930#endif
2931 /* This allows the netif to be shutdown gracefully before resetting */
2932 schedule_delayed_work(&bp->reset_task, 0);
2933}
2934
9f6c9258
DK
2935int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2936{
2937 struct net_device *dev = pci_get_drvdata(pdev);
2938 struct bnx2x *bp;
2939
2940 if (!dev) {
2941 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2942 return -ENODEV;
2943 }
2944 bp = netdev_priv(dev);
2945
2946 rtnl_lock();
2947
2948 pci_save_state(pdev);
2949
2950 if (!netif_running(dev)) {
2951 rtnl_unlock();
2952 return 0;
2953 }
2954
2955 netif_device_detach(dev);
2956
2957 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2958
2959 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2960
2961 rtnl_unlock();
2962
2963 return 0;
2964}
2965
2966int bnx2x_resume(struct pci_dev *pdev)
2967{
2968 struct net_device *dev = pci_get_drvdata(pdev);
2969 struct bnx2x *bp;
2970 int rc;
2971
2972 if (!dev) {
2973 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2974 return -ENODEV;
2975 }
2976 bp = netdev_priv(dev);
2977
2978 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2979 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2980 return -EAGAIN;
2981 }
2982
2983 rtnl_lock();
2984
2985 pci_restore_state(pdev);
2986
2987 if (!netif_running(dev)) {
2988 rtnl_unlock();
2989 return 0;
2990 }
2991
2992 bnx2x_set_power_state(bp, PCI_D0);
2993 netif_device_attach(dev);
2994
f2e0899f
DK
2995 /* Since the chip was reset, clear the FW sequence number */
2996 bp->fw_seq = 0;
9f6c9258
DK
2997 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2998
2999 rtnl_unlock();
3000
3001 return rc;
3002}