net: remove interrupt.h inclusion from netdevice.h
[linux-2.6-block.git] / drivers / net / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
5de92408 3 * Copyright (c) 2007-2011 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
9f6c9258 18#include <linux/etherdevice.h>
9bcc0893 19#include <linux/if_vlan.h>
a6b7a407 20#include <linux/interrupt.h>
9f6c9258 21#include <linux/ip.h>
f2e0899f 22#include <net/ipv6.h>
7f3e01fe 23#include <net/ip6_checksum.h>
6891dd25 24#include <linux/firmware.h>
c0cba59e 25#include <linux/prefetch.h>
9f6c9258
DK
26#include "bnx2x_cmn.h"
27
523224a3
DK
28#include "bnx2x_init.h"
29
8d96286a 30static int bnx2x_setup_irqs(struct bnx2x *bp);
9f6c9258 31
b3b83c3f
DK
32/**
33 * bnx2x_bz_fp - zero content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @index: fastpath index to be zeroed
37 *
38 * Makes sure the contents of the bp->fp[index].napi is kept
39 * intact.
40 */
41static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42{
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
47
48 /* Restore the NAPI object as it has been already initialized */
49 fp->napi = orig_napi;
50}
51
52/**
53 * bnx2x_move_fp - move content of the fastpath structure.
54 *
55 * @bp: driver handle
56 * @from: source FP index
57 * @to: destination FP index
58 *
59 * Makes sure the contents of the bp->fp[to].napi is kept
60 * intact.
61 */
62static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
63{
64 struct bnx2x_fastpath *from_fp = &bp->fp[from];
65 struct bnx2x_fastpath *to_fp = &bp->fp[to];
66 struct napi_struct orig_napi = to_fp->napi;
67 /* Move bnx2x_fastpath contents */
68 memcpy(to_fp, from_fp, sizeof(*to_fp));
69 to_fp->index = to;
70
71 /* Restore the NAPI object as it has been already initialized */
72 to_fp->napi = orig_napi;
73}
74
9f6c9258
DK
75/* free skb in the packet ring at pos idx
76 * return idx of last bd freed
77 */
78static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
79 u16 idx)
80{
81 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
82 struct eth_tx_start_bd *tx_start_bd;
83 struct eth_tx_bd *tx_data_bd;
84 struct sk_buff *skb = tx_buf->skb;
85 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
86 int nbd;
87
88 /* prefetch skb end pointer to speedup dev_kfree_skb() */
89 prefetch(&skb->end);
90
91 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
92 idx, tx_buf, skb);
93
94 /* unmap first bd */
95 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
96 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
97 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 98 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
99
100 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
101#ifdef BNX2X_STOP_ON_ERROR
102 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
103 BNX2X_ERR("BAD nbd!\n");
104 bnx2x_panic();
105 }
106#endif
107 new_cons = nbd + tx_buf->first_bd;
108
109 /* Get the next bd */
110 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
111
112 /* Skip a parse bd... */
113 --nbd;
114 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
115
116 /* ...and the TSO split header bd since they have no mapping */
117 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
118 --nbd;
119 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
120 }
121
122 /* now free frags */
123 while (nbd > 0) {
124
125 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
126 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
127 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
128 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
129 if (--nbd)
130 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
131 }
132
133 /* release skb */
134 WARN_ON(!skb);
40955532 135 dev_kfree_skb_any(skb);
9f6c9258
DK
136 tx_buf->first_bd = 0;
137 tx_buf->skb = NULL;
138
139 return new_cons;
140}
141
142int bnx2x_tx_int(struct bnx2x_fastpath *fp)
143{
144 struct bnx2x *bp = fp->bp;
145 struct netdev_queue *txq;
146 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
147
148#ifdef BNX2X_STOP_ON_ERROR
149 if (unlikely(bp->panic))
150 return -1;
151#endif
152
153 txq = netdev_get_tx_queue(bp->dev, fp->index);
154 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
155 sw_cons = fp->tx_pkt_cons;
156
157 while (sw_cons != hw_cons) {
158 u16 pkt_cons;
159
160 pkt_cons = TX_BD(sw_cons);
161
f2e0899f
DK
162 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
163 " pkt_cons %u\n",
164 fp->index, hw_cons, sw_cons, pkt_cons);
9f6c9258 165
9f6c9258
DK
166 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
167 sw_cons++;
168 }
169
170 fp->tx_pkt_cons = sw_cons;
171 fp->tx_bd_cons = bd_cons;
172
173 /* Need to make the tx_bd_cons update visible to start_xmit()
174 * before checking for netif_tx_queue_stopped(). Without the
175 * memory barrier, there is a small possibility that
176 * start_xmit() will miss it and cause the queue to be stopped
177 * forever.
178 */
179 smp_mb();
180
9f6c9258
DK
181 if (unlikely(netif_tx_queue_stopped(txq))) {
182 /* Taking tx_lock() is needed to prevent reenabling the queue
183 * while it's empty. This could have happen if rx_action() gets
184 * suspended in bnx2x_tx_int() after the condition before
185 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
186 *
187 * stops the queue->sees fresh tx_bd_cons->releases the queue->
188 * sends some packets consuming the whole queue again->
189 * stops the queue
190 */
191
192 __netif_tx_lock(txq, smp_processor_id());
193
194 if ((netif_tx_queue_stopped(txq)) &&
195 (bp->state == BNX2X_STATE_OPEN) &&
196 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
197 netif_tx_wake_queue(txq);
198
199 __netif_tx_unlock(txq);
200 }
201 return 0;
202}
203
204static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
205 u16 idx)
206{
207 u16 last_max = fp->last_max_sge;
208
209 if (SUB_S16(idx, last_max) > 0)
210 fp->last_max_sge = idx;
211}
212
213static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
214 struct eth_fast_path_rx_cqe *fp_cqe)
215{
216 struct bnx2x *bp = fp->bp;
217 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
218 le16_to_cpu(fp_cqe->len_on_bd)) >>
219 SGE_PAGE_SHIFT;
220 u16 last_max, last_elem, first_elem;
221 u16 delta = 0;
222 u16 i;
223
224 if (!sge_len)
225 return;
226
227 /* First mark all used pages */
228 for (i = 0; i < sge_len; i++)
523224a3
DK
229 SGE_MASK_CLEAR_BIT(fp,
230 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
231
232 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 233 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
234
235 /* Here we assume that the last SGE index is the biggest */
236 prefetch((void *)(fp->sge_mask));
523224a3
DK
237 bnx2x_update_last_max_sge(fp,
238 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
239
240 last_max = RX_SGE(fp->last_max_sge);
241 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
242 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
243
244 /* If ring is not full */
245 if (last_elem + 1 != first_elem)
246 last_elem++;
247
248 /* Now update the prod */
249 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
250 if (likely(fp->sge_mask[i]))
251 break;
252
253 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
254 delta += RX_SGE_MASK_ELEM_SZ;
255 }
256
257 if (delta > 0) {
258 fp->rx_sge_prod += delta;
259 /* clear page-end entries */
260 bnx2x_clear_sge_mask_next_elems(fp);
261 }
262
263 DP(NETIF_MSG_RX_STATUS,
264 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
265 fp->last_max_sge, fp->rx_sge_prod);
266}
267
268static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
269 struct sk_buff *skb, u16 cons, u16 prod)
270{
271 struct bnx2x *bp = fp->bp;
272 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
273 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
274 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
275 dma_addr_t mapping;
276
277 /* move empty skb from pool to prod and map it */
278 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
279 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
a8c94b91 280 fp->rx_buf_size, DMA_FROM_DEVICE);
9f6c9258
DK
281 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
282
283 /* move partial skb from cons to pool (don't unmap yet) */
284 fp->tpa_pool[queue] = *cons_rx_buf;
285
286 /* mark bin state as start - print error if current state != stop */
287 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
288 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
289
290 fp->tpa_state[queue] = BNX2X_TPA_START;
291
292 /* point prod_bd to new skb */
293 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
294 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
295
296#ifdef BNX2X_STOP_ON_ERROR
297 fp->tpa_queue_used |= (1 << queue);
298#ifdef _ASM_GENERIC_INT_L64_H
299 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
300#else
301 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
302#endif
303 fp->tpa_queue_used);
304#endif
305}
306
e4e3c02a
VZ
307/* Timestamp option length allowed for TPA aggregation:
308 *
309 * nop nop kind length echo val
310 */
311#define TPA_TSTAMP_OPT_LEN 12
312/**
e8920674 313 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 314 *
e8920674
DK
315 * @bp: driver handle
316 * @parsing_flags: parsing flags from the START CQE
317 * @len_on_bd: total length of the first packet for the
318 * aggregation.
319 *
320 * Approximate value of the MSS for this aggregation calculated using
321 * the first packet of it.
e4e3c02a
VZ
322 */
323static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
324 u16 len_on_bd)
325{
326 /* TPA arrgregation won't have an IP options and TCP options
327 * other than timestamp.
328 */
329 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
330
331
332 /* Check if there was a TCP timestamp, if there is it's will
333 * always be 12 bytes length: nop nop kind length echo val.
334 *
335 * Otherwise FW would close the aggregation.
336 */
337 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
338 hdrs_len += TPA_TSTAMP_OPT_LEN;
339
340 return len_on_bd - hdrs_len;
341}
342
9f6c9258
DK
343static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
344 struct sk_buff *skb,
345 struct eth_fast_path_rx_cqe *fp_cqe,
e4e3c02a 346 u16 cqe_idx, u16 parsing_flags)
9f6c9258
DK
347{
348 struct sw_rx_page *rx_pg, old_rx_pg;
349 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
350 u32 i, frag_len, frag_size, pages;
351 int err;
352 int j;
353
354 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
355 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
356
357 /* This is needed in order to enable forwarding support */
358 if (frag_size)
e4e3c02a
VZ
359 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
360 len_on_bd);
9f6c9258
DK
361
362#ifdef BNX2X_STOP_ON_ERROR
363 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
364 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
365 pages, cqe_idx);
366 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
367 fp_cqe->pkt_len, len_on_bd);
368 bnx2x_panic();
369 return -EINVAL;
370 }
371#endif
372
373 /* Run through the SGL and compose the fragmented skb */
374 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
523224a3
DK
375 u16 sge_idx =
376 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
377
378 /* FW gives the indices of the SGE as if the ring is an array
379 (meaning that "next" element will consume 2 indices) */
380 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
381 rx_pg = &fp->rx_page_ring[sge_idx];
382 old_rx_pg = *rx_pg;
383
384 /* If we fail to allocate a substitute page, we simply stop
385 where we are and drop the whole packet */
386 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
387 if (unlikely(err)) {
388 fp->eth_q_stats.rx_skb_alloc_failed++;
389 return err;
390 }
391
392 /* Unmap the page as we r going to pass it to the stack */
393 dma_unmap_page(&bp->pdev->dev,
394 dma_unmap_addr(&old_rx_pg, mapping),
395 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
396
397 /* Add one frag and update the appropriate fields in the skb */
398 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
399
400 skb->data_len += frag_len;
401 skb->truesize += frag_len;
402 skb->len += frag_len;
403
404 frag_size -= frag_len;
405 }
406
407 return 0;
408}
409
410static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
411 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
412 u16 cqe_idx)
413{
414 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
415 struct sk_buff *skb = rx_buf->skb;
416 /* alloc new skb */
a8c94b91 417 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
9f6c9258
DK
418
419 /* Unmap skb in the pool anyway, as we are going to change
420 pool entry status to BNX2X_TPA_STOP even if new skb allocation
421 fails. */
422 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 423 fp->rx_buf_size, DMA_FROM_DEVICE);
9f6c9258
DK
424
425 if (likely(new_skb)) {
426 /* fix ip xsum and give it to the stack */
427 /* (no need to map the new skb) */
e4e3c02a
VZ
428 u16 parsing_flags =
429 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
9f6c9258
DK
430
431 prefetch(skb);
217de5aa 432 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
433
434#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 435 if (pad + len > fp->rx_buf_size) {
9f6c9258
DK
436 BNX2X_ERR("skb_put is about to fail... "
437 "pad %d len %d rx_buf_size %d\n",
a8c94b91 438 pad, len, fp->rx_buf_size);
9f6c9258
DK
439 bnx2x_panic();
440 return;
441 }
442#endif
443
444 skb_reserve(skb, pad);
445 skb_put(skb, len);
446
447 skb->protocol = eth_type_trans(skb, bp->dev);
448 skb->ip_summed = CHECKSUM_UNNECESSARY;
449
450 {
451 struct iphdr *iph;
452
453 iph = (struct iphdr *)skb->data;
9f6c9258
DK
454 iph->check = 0;
455 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
456 }
457
458 if (!bnx2x_fill_frag_skb(bp, fp, skb,
e4e3c02a
VZ
459 &cqe->fast_path_cqe, cqe_idx,
460 parsing_flags)) {
461 if (parsing_flags & PARSING_FLAGS_VLAN)
9bcc0893 462 __vlan_hwaccel_put_tag(skb,
9f6c9258 463 le16_to_cpu(cqe->fast_path_cqe.
9bcc0893
HZ
464 vlan_tag));
465 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
466 } else {
467 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
468 " - dropping packet!\n");
40955532 469 dev_kfree_skb_any(skb);
9f6c9258
DK
470 }
471
472
473 /* put new skb in bin */
474 fp->tpa_pool[queue].skb = new_skb;
475
476 } else {
477 /* else drop the packet and keep the buffer in the bin */
478 DP(NETIF_MSG_RX_STATUS,
479 "Failed to allocate new skb - dropping packet!\n");
480 fp->eth_q_stats.rx_skb_alloc_failed++;
481 }
482
483 fp->tpa_state[queue] = BNX2X_TPA_STOP;
484}
485
486/* Set Toeplitz hash value in the skb using the value from the
487 * CQE (calculated by HW).
488 */
489static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
490 struct sk_buff *skb)
491{
492 /* Set Toeplitz hash from CQE */
493 if ((bp->dev->features & NETIF_F_RXHASH) &&
494 (cqe->fast_path_cqe.status_flags &
495 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
496 skb->rxhash =
497 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
498}
499
500int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
501{
502 struct bnx2x *bp = fp->bp;
503 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
504 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
505 int rx_pkt = 0;
506
507#ifdef BNX2X_STOP_ON_ERROR
508 if (unlikely(bp->panic))
509 return 0;
510#endif
511
512 /* CQ "next element" is of the size of the regular element,
513 that's why it's ok here */
514 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
515 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
516 hw_comp_cons++;
517
518 bd_cons = fp->rx_bd_cons;
519 bd_prod = fp->rx_bd_prod;
520 bd_prod_fw = bd_prod;
521 sw_comp_cons = fp->rx_comp_cons;
522 sw_comp_prod = fp->rx_comp_prod;
523
524 /* Memory barrier necessary as speculative reads of the rx
525 * buffer can be ahead of the index in the status block
526 */
527 rmb();
528
529 DP(NETIF_MSG_RX_STATUS,
530 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
531 fp->index, hw_comp_cons, sw_comp_cons);
532
533 while (sw_comp_cons != hw_comp_cons) {
534 struct sw_rx_bd *rx_buf = NULL;
535 struct sk_buff *skb;
536 union eth_rx_cqe *cqe;
537 u8 cqe_fp_flags;
538 u16 len, pad;
539
540 comp_ring_cons = RCQ_BD(sw_comp_cons);
541 bd_prod = RX_BD(bd_prod);
542 bd_cons = RX_BD(bd_cons);
543
544 /* Prefetch the page containing the BD descriptor
545 at producer's index. It will be needed when new skb is
546 allocated */
547 prefetch((void *)(PAGE_ALIGN((unsigned long)
548 (&fp->rx_desc_ring[bd_prod])) -
549 PAGE_SIZE + 1));
550
551 cqe = &fp->rx_comp_ring[comp_ring_cons];
552 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
553
554 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
555 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
556 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
557 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
558 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
559 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
560
561 /* is this a slowpath msg? */
562 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
563 bnx2x_sp_event(fp, cqe);
564 goto next_cqe;
565
566 /* this is an rx packet */
567 } else {
568 rx_buf = &fp->rx_buf_ring[bd_cons];
569 skb = rx_buf->skb;
570 prefetch(skb);
571 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
572 pad = cqe->fast_path_cqe.placement_offset;
573
fe78d263
VZ
574 /* - If CQE is marked both TPA_START and TPA_END it is
575 * a non-TPA CQE.
576 * - FP CQE will always have either TPA_START or/and
577 * TPA_STOP flags set.
578 */
9f6c9258
DK
579 if ((!fp->disable_tpa) &&
580 (TPA_TYPE(cqe_fp_flags) !=
581 (TPA_TYPE_START | TPA_TYPE_END))) {
582 u16 queue = cqe->fast_path_cqe.queue_index;
583
584 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
585 DP(NETIF_MSG_RX_STATUS,
586 "calling tpa_start on queue %d\n",
587 queue);
588
589 bnx2x_tpa_start(fp, queue, skb,
590 bd_cons, bd_prod);
591
592 /* Set Toeplitz hash for an LRO skb */
593 bnx2x_set_skb_rxhash(bp, cqe, skb);
594
595 goto next_rx;
fe78d263 596 } else { /* TPA_STOP */
9f6c9258
DK
597 DP(NETIF_MSG_RX_STATUS,
598 "calling tpa_stop on queue %d\n",
599 queue);
600
601 if (!BNX2X_RX_SUM_FIX(cqe))
602 BNX2X_ERR("STOP on none TCP "
603 "data\n");
604
605 /* This is a size of the linear data
606 on this skb */
607 len = le16_to_cpu(cqe->fast_path_cqe.
608 len_on_bd);
609 bnx2x_tpa_stop(bp, fp, queue, pad,
610 len, cqe, comp_ring_cons);
611#ifdef BNX2X_STOP_ON_ERROR
612 if (bp->panic)
613 return 0;
614#endif
615
616 bnx2x_update_sge_prod(fp,
617 &cqe->fast_path_cqe);
618 goto next_cqe;
619 }
620 }
621
622 dma_sync_single_for_device(&bp->pdev->dev,
623 dma_unmap_addr(rx_buf, mapping),
624 pad + RX_COPY_THRESH,
625 DMA_FROM_DEVICE);
217de5aa 626 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
627
628 /* is this an error packet? */
629 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
630 DP(NETIF_MSG_RX_ERR,
631 "ERROR flags %x rx packet %u\n",
632 cqe_fp_flags, sw_comp_cons);
633 fp->eth_q_stats.rx_err_discard_pkt++;
634 goto reuse_rx;
635 }
636
637 /* Since we don't have a jumbo ring
638 * copy small packets if mtu > 1500
639 */
640 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
641 (len <= RX_COPY_THRESH)) {
642 struct sk_buff *new_skb;
643
644 new_skb = netdev_alloc_skb(bp->dev,
645 len + pad);
646 if (new_skb == NULL) {
647 DP(NETIF_MSG_RX_ERR,
648 "ERROR packet dropped "
649 "because of alloc failure\n");
650 fp->eth_q_stats.rx_skb_alloc_failed++;
651 goto reuse_rx;
652 }
653
654 /* aligned copy */
655 skb_copy_from_linear_data_offset(skb, pad,
656 new_skb->data + pad, len);
657 skb_reserve(new_skb, pad);
658 skb_put(new_skb, len);
659
749a8503 660 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
661
662 skb = new_skb;
663
664 } else
665 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
666 dma_unmap_single(&bp->pdev->dev,
667 dma_unmap_addr(rx_buf, mapping),
a8c94b91 668 fp->rx_buf_size,
9f6c9258
DK
669 DMA_FROM_DEVICE);
670 skb_reserve(skb, pad);
671 skb_put(skb, len);
672
673 } else {
674 DP(NETIF_MSG_RX_ERR,
675 "ERROR packet dropped because "
676 "of alloc failure\n");
677 fp->eth_q_stats.rx_skb_alloc_failed++;
678reuse_rx:
749a8503 679 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
680 goto next_rx;
681 }
682
683 skb->protocol = eth_type_trans(skb, bp->dev);
684
685 /* Set Toeplitz hash for a none-LRO skb */
686 bnx2x_set_skb_rxhash(bp, cqe, skb);
687
bc8acf2c 688 skb_checksum_none_assert(skb);
f85582f8 689
66371c44 690 if (bp->dev->features & NETIF_F_RXCSUM) {
9f6c9258
DK
691 if (likely(BNX2X_RX_CSUM_OK(cqe)))
692 skb->ip_summed = CHECKSUM_UNNECESSARY;
693 else
694 fp->eth_q_stats.hw_csum_err++;
695 }
696 }
697
698 skb_record_rx_queue(skb, fp->index);
699
9bcc0893
HZ
700 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
701 PARSING_FLAGS_VLAN)
702 __vlan_hwaccel_put_tag(skb,
703 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
704 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
705
706
707next_rx:
708 rx_buf->skb = NULL;
709
710 bd_cons = NEXT_RX_IDX(bd_cons);
711 bd_prod = NEXT_RX_IDX(bd_prod);
712 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
713 rx_pkt++;
714next_cqe:
715 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
716 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
717
718 if (rx_pkt == budget)
719 break;
720 } /* while */
721
722 fp->rx_bd_cons = bd_cons;
723 fp->rx_bd_prod = bd_prod_fw;
724 fp->rx_comp_cons = sw_comp_cons;
725 fp->rx_comp_prod = sw_comp_prod;
726
727 /* Update producers */
728 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
729 fp->rx_sge_prod);
730
731 fp->rx_pkt += rx_pkt;
732 fp->rx_calls++;
733
734 return rx_pkt;
735}
736
737static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
738{
739 struct bnx2x_fastpath *fp = fp_cookie;
740 struct bnx2x *bp = fp->bp;
741
742 /* Return here if interrupt is disabled */
743 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
744 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
745 return IRQ_HANDLED;
746 }
747
523224a3
DK
748 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
749 "[fp %d fw_sd %d igusb %d]\n",
750 fp->index, fp->fw_sb_id, fp->igu_sb_id);
751 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
752
753#ifdef BNX2X_STOP_ON_ERROR
754 if (unlikely(bp->panic))
755 return IRQ_HANDLED;
756#endif
757
758 /* Handle Rx and Tx according to MSI-X vector */
759 prefetch(fp->rx_cons_sb);
760 prefetch(fp->tx_cons_sb);
523224a3 761 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
762 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
763
764 return IRQ_HANDLED;
765}
766
9f6c9258
DK
767/* HW Lock for shared dual port PHYs */
768void bnx2x_acquire_phy_lock(struct bnx2x *bp)
769{
770 mutex_lock(&bp->port.phy_mutex);
771
772 if (bp->port.need_hw_lock)
773 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
774}
775
776void bnx2x_release_phy_lock(struct bnx2x *bp)
777{
778 if (bp->port.need_hw_lock)
779 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
780
781 mutex_unlock(&bp->port.phy_mutex);
782}
783
0793f83f
DK
784/* calculates MF speed according to current linespeed and MF configuration */
785u16 bnx2x_get_mf_speed(struct bnx2x *bp)
786{
787 u16 line_speed = bp->link_vars.line_speed;
788 if (IS_MF(bp)) {
faa6fcbb
DK
789 u16 maxCfg = bnx2x_extract_max_cfg(bp,
790 bp->mf_config[BP_VN(bp)]);
791
792 /* Calculate the current MAX line speed limit for the MF
793 * devices
0793f83f 794 */
faa6fcbb
DK
795 if (IS_MF_SI(bp))
796 line_speed = (line_speed * maxCfg) / 100;
797 else { /* SD mode */
0793f83f
DK
798 u16 vn_max_rate = maxCfg * 100;
799
800 if (vn_max_rate < line_speed)
801 line_speed = vn_max_rate;
faa6fcbb 802 }
0793f83f
DK
803 }
804
805 return line_speed;
806}
807
2ae17f66
VZ
808/**
809 * bnx2x_fill_report_data - fill link report data to report
810 *
811 * @bp: driver handle
812 * @data: link state to update
813 *
814 * It uses a none-atomic bit operations because is called under the mutex.
815 */
816static inline void bnx2x_fill_report_data(struct bnx2x *bp,
817 struct bnx2x_link_report_data *data)
818{
819 u16 line_speed = bnx2x_get_mf_speed(bp);
820
821 memset(data, 0, sizeof(*data));
822
823 /* Fill the report data: efective line speed */
824 data->line_speed = line_speed;
825
826 /* Link is down */
827 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
828 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
829 &data->link_report_flags);
830
831 /* Full DUPLEX */
832 if (bp->link_vars.duplex == DUPLEX_FULL)
833 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
834
835 /* Rx Flow Control is ON */
836 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
837 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
838
839 /* Tx Flow Control is ON */
840 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
841 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
842}
843
844/**
845 * bnx2x_link_report - report link status to OS.
846 *
847 * @bp: driver handle
848 *
849 * Calls the __bnx2x_link_report() under the same locking scheme
850 * as a link/PHY state managing code to ensure a consistent link
851 * reporting.
852 */
853
9f6c9258
DK
854void bnx2x_link_report(struct bnx2x *bp)
855{
2ae17f66
VZ
856 bnx2x_acquire_phy_lock(bp);
857 __bnx2x_link_report(bp);
858 bnx2x_release_phy_lock(bp);
859}
9f6c9258 860
2ae17f66
VZ
861/**
862 * __bnx2x_link_report - report link status to OS.
863 *
864 * @bp: driver handle
865 *
866 * None atomic inmlementation.
867 * Should be called under the phy_lock.
868 */
869void __bnx2x_link_report(struct bnx2x *bp)
870{
871 struct bnx2x_link_report_data cur_data;
9f6c9258 872
2ae17f66
VZ
873 /* reread mf_cfg */
874 if (!CHIP_IS_E1(bp))
875 bnx2x_read_mf_cfg(bp);
876
877 /* Read the current link report info */
878 bnx2x_fill_report_data(bp, &cur_data);
879
880 /* Don't report link down or exactly the same link status twice */
881 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
882 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
883 &bp->last_reported_link.link_report_flags) &&
884 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
885 &cur_data.link_report_flags)))
886 return;
887
888 bp->link_cnt++;
9f6c9258 889
2ae17f66
VZ
890 /* We are going to report a new link parameters now -
891 * remember the current data for the next time.
892 */
893 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 894
2ae17f66
VZ
895 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
896 &cur_data.link_report_flags)) {
897 netif_carrier_off(bp->dev);
898 netdev_err(bp->dev, "NIC Link is Down\n");
899 return;
900 } else {
901 netif_carrier_on(bp->dev);
902 netdev_info(bp->dev, "NIC Link is Up, ");
903 pr_cont("%d Mbps ", cur_data.line_speed);
9f6c9258 904
2ae17f66
VZ
905 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
906 &cur_data.link_report_flags))
9f6c9258
DK
907 pr_cont("full duplex");
908 else
909 pr_cont("half duplex");
910
2ae17f66
VZ
911 /* Handle the FC at the end so that only these flags would be
912 * possibly set. This way we may easily check if there is no FC
913 * enabled.
914 */
915 if (cur_data.link_report_flags) {
916 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
917 &cur_data.link_report_flags)) {
9f6c9258 918 pr_cont(", receive ");
2ae17f66
VZ
919 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
920 &cur_data.link_report_flags))
9f6c9258
DK
921 pr_cont("& transmit ");
922 } else {
923 pr_cont(", transmit ");
924 }
925 pr_cont("flow control ON");
926 }
927 pr_cont("\n");
9f6c9258
DK
928 }
929}
930
931void bnx2x_init_rx_rings(struct bnx2x *bp)
932{
933 int func = BP_FUNC(bp);
934 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
935 ETH_MAX_AGGREGATION_QUEUES_E1H;
523224a3 936 u16 ring_prod;
9f6c9258 937 int i, j;
25141580 938
b3b83c3f 939 /* Allocate TPA resources */
ec6ba945 940 for_each_rx_queue(bp, j) {
523224a3 941 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 942
a8c94b91
VZ
943 DP(NETIF_MSG_IFUP,
944 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
945
523224a3 946 if (!fp->disable_tpa) {
b3b83c3f 947 /* Fill the per-aggregation pool */
9f6c9258
DK
948 for (i = 0; i < max_agg_queues; i++) {
949 fp->tpa_pool[i].skb =
a8c94b91 950 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
9f6c9258
DK
951 if (!fp->tpa_pool[i].skb) {
952 BNX2X_ERR("Failed to allocate TPA "
953 "skb pool for queue[%d] - "
954 "disabling TPA on this "
955 "queue!\n", j);
956 bnx2x_free_tpa_pool(bp, fp, i);
957 fp->disable_tpa = 1;
958 break;
959 }
960 dma_unmap_addr_set((struct sw_rx_bd *)
961 &bp->fp->tpa_pool[i],
962 mapping, 0);
963 fp->tpa_state[i] = BNX2X_TPA_STOP;
964 }
523224a3
DK
965
966 /* "next page" elements initialization */
967 bnx2x_set_next_page_sgl(fp);
968
969 /* set SGEs bit mask */
970 bnx2x_init_sge_ring_bit_mask(fp);
971
972 /* Allocate SGEs and initialize the ring elements */
973 for (i = 0, ring_prod = 0;
974 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
975
976 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
977 BNX2X_ERR("was only able to allocate "
978 "%d rx sges\n", i);
979 BNX2X_ERR("disabling TPA for"
980 " queue[%d]\n", j);
981 /* Cleanup already allocated elements */
982 bnx2x_free_rx_sge_range(bp,
983 fp, ring_prod);
984 bnx2x_free_tpa_pool(bp,
985 fp, max_agg_queues);
986 fp->disable_tpa = 1;
987 ring_prod = 0;
988 break;
989 }
990 ring_prod = NEXT_SGE_IDX(ring_prod);
991 }
992
993 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
994 }
995 }
996
ec6ba945 997 for_each_rx_queue(bp, j) {
9f6c9258
DK
998 struct bnx2x_fastpath *fp = &bp->fp[j];
999
1000 fp->rx_bd_cons = 0;
9f6c9258 1001
b3b83c3f
DK
1002 /* Activate BD ring */
1003 /* Warning!
1004 * this will generate an interrupt (to the TSTORM)
1005 * must only be done after chip is initialized
1006 */
1007 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1008 fp->rx_sge_prod);
9f6c9258 1009
9f6c9258
DK
1010 if (j != 0)
1011 continue;
1012
f2e0899f
DK
1013 if (!CHIP_IS_E2(bp)) {
1014 REG_WR(bp, BAR_USTRORM_INTMEM +
1015 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1016 U64_LO(fp->rx_comp_mapping));
1017 REG_WR(bp, BAR_USTRORM_INTMEM +
1018 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1019 U64_HI(fp->rx_comp_mapping));
1020 }
9f6c9258
DK
1021 }
1022}
f85582f8 1023
9f6c9258
DK
1024static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1025{
1026 int i;
1027
ec6ba945 1028 for_each_tx_queue(bp, i) {
9f6c9258
DK
1029 struct bnx2x_fastpath *fp = &bp->fp[i];
1030
1031 u16 bd_cons = fp->tx_bd_cons;
1032 u16 sw_prod = fp->tx_pkt_prod;
1033 u16 sw_cons = fp->tx_pkt_cons;
1034
1035 while (sw_cons != sw_prod) {
1036 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1037 sw_cons++;
1038 }
1039 }
1040}
1041
b3b83c3f
DK
1042static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1043{
1044 struct bnx2x *bp = fp->bp;
1045 int i;
1046
1047 /* ring wasn't allocated */
1048 if (fp->rx_buf_ring == NULL)
1049 return;
1050
1051 for (i = 0; i < NUM_RX_BD; i++) {
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1053 struct sk_buff *skb = rx_buf->skb;
1054
1055 if (skb == NULL)
1056 continue;
1057
1058 dma_unmap_single(&bp->pdev->dev,
1059 dma_unmap_addr(rx_buf, mapping),
1060 fp->rx_buf_size, DMA_FROM_DEVICE);
1061
1062 rx_buf->skb = NULL;
1063 dev_kfree_skb(skb);
1064 }
1065}
1066
9f6c9258
DK
1067static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1068{
b3b83c3f 1069 int j;
9f6c9258 1070
ec6ba945 1071 for_each_rx_queue(bp, j) {
9f6c9258
DK
1072 struct bnx2x_fastpath *fp = &bp->fp[j];
1073
b3b83c3f 1074 bnx2x_free_rx_bds(fp);
9f6c9258 1075
9f6c9258
DK
1076 if (!fp->disable_tpa)
1077 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1078 ETH_MAX_AGGREGATION_QUEUES_E1 :
1079 ETH_MAX_AGGREGATION_QUEUES_E1H);
1080 }
1081}
1082
1083void bnx2x_free_skbs(struct bnx2x *bp)
1084{
1085 bnx2x_free_tx_skbs(bp);
1086 bnx2x_free_rx_skbs(bp);
1087}
1088
e3835b99
DK
1089void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1090{
1091 /* load old values */
1092 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1093
1094 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1095 /* leave all but MAX value */
1096 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1097
1098 /* set new MAX value */
1099 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1100 & FUNC_MF_CFG_MAX_BW_MASK;
1101
1102 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1103 }
1104}
1105
9f6c9258
DK
1106static void bnx2x_free_msix_irqs(struct bnx2x *bp)
1107{
1108 int i, offset = 1;
1109
1110 free_irq(bp->msix_table[0].vector, bp->dev);
1111 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1112 bp->msix_table[0].vector);
1113
1114#ifdef BCM_CNIC
1115 offset++;
1116#endif
ec6ba945 1117 for_each_eth_queue(bp, i) {
9f6c9258
DK
1118 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
1119 "state %x\n", i, bp->msix_table[i + offset].vector,
1120 bnx2x_fp(bp, i, state));
1121
1122 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
1123 }
1124}
1125
d6214d7a 1126void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1127{
d6214d7a
DK
1128 if (bp->flags & USING_MSIX_FLAG)
1129 bnx2x_free_msix_irqs(bp);
1130 else if (bp->flags & USING_MSI_FLAG)
1131 free_irq(bp->pdev->irq, bp->dev);
1132 else
9f6c9258
DK
1133 free_irq(bp->pdev->irq, bp->dev);
1134}
1135
d6214d7a 1136int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1137{
d6214d7a 1138 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1139
d6214d7a
DK
1140 bp->msix_table[msix_vec].entry = msix_vec;
1141 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1142 bp->msix_table[0].entry);
1143 msix_vec++;
9f6c9258
DK
1144
1145#ifdef BCM_CNIC
d6214d7a
DK
1146 bp->msix_table[msix_vec].entry = msix_vec;
1147 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1148 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1149 msix_vec++;
9f6c9258 1150#endif
ec6ba945 1151 for_each_eth_queue(bp, i) {
d6214d7a 1152 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1153 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1154 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1155 msix_vec++;
9f6c9258
DK
1156 }
1157
ec6ba945 1158 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
d6214d7a
DK
1159
1160 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1161
1162 /*
1163 * reconfigure number of tx/rx queues according to available
1164 * MSI-X vectors
1165 */
1166 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1167 /* how less vectors we will have? */
1168 int diff = req_cnt - rc;
9f6c9258
DK
1169
1170 DP(NETIF_MSG_IFUP,
1171 "Trying to use less MSI-X vectors: %d\n", rc);
1172
1173 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1174
1175 if (rc) {
1176 DP(NETIF_MSG_IFUP,
1177 "MSI-X is not attainable rc %d\n", rc);
1178 return rc;
1179 }
d6214d7a
DK
1180 /*
1181 * decrease number of queues by number of unallocated entries
1182 */
1183 bp->num_queues -= diff;
9f6c9258
DK
1184
1185 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1186 bp->num_queues);
1187 } else if (rc) {
d6214d7a
DK
1188 /* fall to INTx if not enough memory */
1189 if (rc == -ENOMEM)
1190 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1191 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1192 return rc;
1193 }
1194
1195 bp->flags |= USING_MSIX_FLAG;
1196
1197 return 0;
1198}
1199
1200static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1201{
1202 int i, rc, offset = 1;
1203
1204 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1205 bp->dev->name, bp->dev);
1206 if (rc) {
1207 BNX2X_ERR("request sp irq failed\n");
1208 return -EBUSY;
1209 }
1210
1211#ifdef BCM_CNIC
1212 offset++;
1213#endif
ec6ba945 1214 for_each_eth_queue(bp, i) {
9f6c9258
DK
1215 struct bnx2x_fastpath *fp = &bp->fp[i];
1216 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1217 bp->dev->name, i);
1218
d6214d7a 1219 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1220 bnx2x_msix_fp_int, 0, fp->name, fp);
1221 if (rc) {
1222 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1223 bnx2x_free_msix_irqs(bp);
1224 return -EBUSY;
1225 }
1226
d6214d7a 1227 offset++;
9f6c9258
DK
1228 fp->state = BNX2X_FP_STATE_IRQ;
1229 }
1230
ec6ba945 1231 i = BNX2X_NUM_ETH_QUEUES(bp);
d6214d7a 1232 offset = 1 + CNIC_CONTEXT_USE;
9f6c9258
DK
1233 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1234 " ... fp[%d] %d\n",
1235 bp->msix_table[0].vector,
1236 0, bp->msix_table[offset].vector,
1237 i - 1, bp->msix_table[offset + i - 1].vector);
1238
1239 return 0;
1240}
1241
d6214d7a 1242int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1243{
1244 int rc;
1245
1246 rc = pci_enable_msi(bp->pdev);
1247 if (rc) {
1248 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1249 return -1;
1250 }
1251 bp->flags |= USING_MSI_FLAG;
1252
1253 return 0;
1254}
1255
1256static int bnx2x_req_irq(struct bnx2x *bp)
1257{
1258 unsigned long flags;
1259 int rc;
1260
1261 if (bp->flags & USING_MSI_FLAG)
1262 flags = 0;
1263 else
1264 flags = IRQF_SHARED;
1265
1266 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1267 bp->dev->name, bp->dev);
1268 if (!rc)
1269 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1270
1271 return rc;
1272}
1273
1274static void bnx2x_napi_enable(struct bnx2x *bp)
1275{
1276 int i;
1277
ec6ba945 1278 for_each_napi_queue(bp, i)
9f6c9258
DK
1279 napi_enable(&bnx2x_fp(bp, i, napi));
1280}
1281
1282static void bnx2x_napi_disable(struct bnx2x *bp)
1283{
1284 int i;
1285
ec6ba945 1286 for_each_napi_queue(bp, i)
9f6c9258
DK
1287 napi_disable(&bnx2x_fp(bp, i, napi));
1288}
1289
1290void bnx2x_netif_start(struct bnx2x *bp)
1291{
1292 int intr_sem;
1293
1294 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1295 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1296
1297 if (intr_sem) {
1298 if (netif_running(bp->dev)) {
1299 bnx2x_napi_enable(bp);
1300 bnx2x_int_enable(bp);
1301 if (bp->state == BNX2X_STATE_OPEN)
1302 netif_tx_wake_all_queues(bp->dev);
1303 }
1304 }
1305}
1306
1307void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1308{
1309 bnx2x_int_disable_sync(bp, disable_hw);
1310 bnx2x_napi_disable(bp);
1311 netif_tx_disable(bp->dev);
1312}
9f6c9258 1313
8307fa3e
VZ
1314u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1315{
1316#ifdef BCM_CNIC
1317 struct bnx2x *bp = netdev_priv(dev);
1318 if (NO_FCOE(bp))
1319 return skb_tx_hash(dev, skb);
1320 else {
1321 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1322 u16 ether_type = ntohs(hdr->h_proto);
1323
1324 /* Skip VLAN tag if present */
1325 if (ether_type == ETH_P_8021Q) {
1326 struct vlan_ethhdr *vhdr =
1327 (struct vlan_ethhdr *)skb->data;
1328
1329 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1330 }
1331
1332 /* If ethertype is FCoE or FIP - use FCoE ring */
1333 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1334 return bnx2x_fcoe(bp, index);
1335 }
1336#endif
1337 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1338 */
1339 return __skb_tx_hash(dev, skb,
1340 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1341}
1342
d6214d7a
DK
1343void bnx2x_set_num_queues(struct bnx2x *bp)
1344{
1345 switch (bp->multi_mode) {
1346 case ETH_RSS_MODE_DISABLED:
9f6c9258 1347 bp->num_queues = 1;
d6214d7a
DK
1348 break;
1349 case ETH_RSS_MODE_REGULAR:
1350 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258 1351 break;
f85582f8 1352
9f6c9258 1353 default:
d6214d7a 1354 bp->num_queues = 1;
9f6c9258
DK
1355 break;
1356 }
ec6ba945
VZ
1357
1358 /* Add special queues */
1359 bp->num_queues += NONE_ETH_CONTEXT_USE;
1360}
1361
1362#ifdef BCM_CNIC
1363static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1364{
1365 if (!NO_FCOE(bp)) {
1366 if (!IS_MF_SD(bp))
1367 bnx2x_set_fip_eth_mac_addr(bp, 1);
1368 bnx2x_set_all_enode_macs(bp, 1);
1369 bp->flags |= FCOE_MACS_SET;
1370 }
9f6c9258 1371}
ec6ba945 1372#endif
9f6c9258 1373
6891dd25
DK
1374static void bnx2x_release_firmware(struct bnx2x *bp)
1375{
1376 kfree(bp->init_ops_offsets);
1377 kfree(bp->init_ops);
1378 kfree(bp->init_data);
1379 release_firmware(bp->firmware);
1380}
1381
ec6ba945
VZ
1382static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1383{
1384 int rc, num = bp->num_queues;
1385
1386#ifdef BCM_CNIC
1387 if (NO_FCOE(bp))
1388 num -= FCOE_CONTEXT_USE;
1389
1390#endif
1391 netif_set_real_num_tx_queues(bp->dev, num);
1392 rc = netif_set_real_num_rx_queues(bp->dev, num);
1393 return rc;
1394}
1395
a8c94b91
VZ
1396static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1397{
1398 int i;
1399
1400 for_each_queue(bp, i) {
1401 struct bnx2x_fastpath *fp = &bp->fp[i];
1402
1403 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1404 if (IS_FCOE_IDX(i))
1405 /*
1406 * Although there are no IP frames expected to arrive to
1407 * this ring we still want to add an
1408 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1409 * overrun attack.
1410 */
1411 fp->rx_buf_size =
1412 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1413 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1414 else
1415 fp->rx_buf_size =
1416 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1417 IP_HEADER_ALIGNMENT_PADDING;
1418 }
1419}
1420
9f6c9258
DK
1421/* must be called with rtnl_lock */
1422int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1423{
1424 u32 load_code;
1425 int i, rc;
1426
6891dd25
DK
1427 /* Set init arrays */
1428 rc = bnx2x_init_firmware(bp);
1429 if (rc) {
1430 BNX2X_ERR("Error loading firmware\n");
1431 return rc;
1432 }
1433
9f6c9258
DK
1434#ifdef BNX2X_STOP_ON_ERROR
1435 if (unlikely(bp->panic))
1436 return -EPERM;
1437#endif
1438
1439 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1440
2ae17f66
VZ
1441 /* Set the initial link reported state to link down */
1442 bnx2x_acquire_phy_lock(bp);
1443 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1444 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1445 &bp->last_reported_link.link_report_flags);
1446 bnx2x_release_phy_lock(bp);
1447
523224a3
DK
1448 /* must be called before memory allocation and HW init */
1449 bnx2x_ilt_set_info(bp);
1450
b3b83c3f
DK
1451 /* zero fastpath structures preserving invariants like napi which are
1452 * allocated only once
1453 */
1454 for_each_queue(bp, i)
1455 bnx2x_bz_fp(bp, i);
1456
a8c94b91
VZ
1457 /* Set the receive queues buffer size */
1458 bnx2x_set_rx_buf_size(bp);
1459
b3b83c3f
DK
1460 for_each_queue(bp, i)
1461 bnx2x_fp(bp, i, disable_tpa) =
1462 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1463
1464#ifdef BCM_CNIC
1465 /* We don't want TPA on FCoE L2 ring */
1466 bnx2x_fcoe(bp, disable_tpa) = 1;
1467#endif
1468
d6214d7a 1469 if (bnx2x_alloc_mem(bp))
9f6c9258 1470 return -ENOMEM;
d6214d7a 1471
b3b83c3f
DK
1472 /* As long as bnx2x_alloc_mem() may possibly update
1473 * bp->num_queues, bnx2x_set_real_num_queues() should always
1474 * come after it.
1475 */
ec6ba945 1476 rc = bnx2x_set_real_num_queues(bp);
d6214d7a 1477 if (rc) {
ec6ba945 1478 BNX2X_ERR("Unable to set real_num_queues\n");
d6214d7a 1479 goto load_error0;
9f6c9258
DK
1480 }
1481
9f6c9258
DK
1482 bnx2x_napi_enable(bp);
1483
9f6c9258
DK
1484 /* Send LOAD_REQUEST command to MCP
1485 Returns the type of LOAD command:
1486 if it is the first port to be initialized
1487 common blocks should be initialized, otherwise - not
1488 */
1489 if (!BP_NOMCP(bp)) {
a22f0788 1490 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1491 if (!load_code) {
1492 BNX2X_ERR("MCP response failure, aborting\n");
1493 rc = -EBUSY;
d6214d7a 1494 goto load_error1;
9f6c9258
DK
1495 }
1496 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1497 rc = -EBUSY; /* other port in diagnostic mode */
d6214d7a 1498 goto load_error1;
9f6c9258
DK
1499 }
1500
1501 } else {
f2e0899f 1502 int path = BP_PATH(bp);
9f6c9258
DK
1503 int port = BP_PORT(bp);
1504
f2e0899f
DK
1505 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1506 path, load_count[path][0], load_count[path][1],
1507 load_count[path][2]);
1508 load_count[path][0]++;
1509 load_count[path][1 + port]++;
1510 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1511 path, load_count[path][0], load_count[path][1],
1512 load_count[path][2]);
1513 if (load_count[path][0] == 1)
9f6c9258 1514 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1515 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1516 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1517 else
1518 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1519 }
1520
1521 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1522 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
9f6c9258
DK
1523 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1524 bp->port.pmf = 1;
1525 else
1526 bp->port.pmf = 0;
1527 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1528
1529 /* Initialize HW */
1530 rc = bnx2x_init_hw(bp, load_code);
1531 if (rc) {
1532 BNX2X_ERR("HW init failed, aborting\n");
a22f0788 1533 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1534 goto load_error2;
1535 }
1536
d6214d7a
DK
1537 /* Connect to IRQs */
1538 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1539 if (rc) {
1540 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1541 goto load_error2;
1542 }
1543
9f6c9258
DK
1544 /* Setup NIC internals and enable interrupts */
1545 bnx2x_nic_init(bp, load_code);
1546
f2e0899f
DK
1547 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1548 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
9f6c9258
DK
1549 (bp->common.shmem2_base))
1550 SHMEM2_WR(bp, dcc_support,
1551 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1552 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1553
1554 /* Send LOAD_DONE command to MCP */
1555 if (!BP_NOMCP(bp)) {
a22f0788 1556 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1557 if (!load_code) {
1558 BNX2X_ERR("MCP response failure, aborting\n");
1559 rc = -EBUSY;
1560 goto load_error3;
1561 }
1562 }
1563
e4901dde
VZ
1564 bnx2x_dcbx_init(bp);
1565
9f6c9258
DK
1566 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1567
523224a3
DK
1568 rc = bnx2x_func_start(bp);
1569 if (rc) {
1570 BNX2X_ERR("Function start failed!\n");
1571#ifndef BNX2X_STOP_ON_ERROR
1572 goto load_error3;
1573#else
1574 bp->panic = 1;
1575 return -EBUSY;
1576#endif
1577 }
1578
1579 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
9f6c9258
DK
1580 if (rc) {
1581 BNX2X_ERR("Setup leading failed!\n");
1582#ifndef BNX2X_STOP_ON_ERROR
1583 goto load_error3;
1584#else
1585 bp->panic = 1;
1586 return -EBUSY;
1587#endif
1588 }
1589
f2e0899f
DK
1590 if (!CHIP_IS_E1(bp) &&
1591 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1592 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1593 bp->flags |= MF_FUNC_DIS;
1594 }
9f6c9258 1595
9f6c9258 1596#ifdef BCM_CNIC
523224a3
DK
1597 /* Enable Timer scan */
1598 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
9f6c9258 1599#endif
f85582f8 1600
523224a3
DK
1601 for_each_nondefault_queue(bp, i) {
1602 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1603 if (rc)
9f6c9258 1604#ifdef BCM_CNIC
523224a3 1605 goto load_error4;
9f6c9258 1606#else
523224a3 1607 goto load_error3;
9f6c9258 1608#endif
523224a3
DK
1609 }
1610
1611 /* Now when Clients are configured we are ready to work */
1612 bp->state = BNX2X_STATE_OPEN;
1613
ec6ba945
VZ
1614#ifdef BCM_CNIC
1615 bnx2x_set_fcoe_eth_macs(bp);
1616#endif
1617
523224a3 1618 bnx2x_set_eth_mac(bp, 1);
9f6c9258 1619
6e30dd4e
VZ
1620 /* Clear MC configuration */
1621 if (CHIP_IS_E1(bp))
1622 bnx2x_invalidate_e1_mc_list(bp);
1623 else
1624 bnx2x_invalidate_e1h_mc_list(bp);
1625
1626 /* Clear UC lists configuration */
1627 bnx2x_invalidate_uc_list(bp);
1628
e3835b99
DK
1629 if (bp->pending_max) {
1630 bnx2x_update_max_mf_config(bp, bp->pending_max);
1631 bp->pending_max = 0;
1632 }
1633
9f6c9258
DK
1634 if (bp->port.pmf)
1635 bnx2x_initial_phy_init(bp, load_mode);
1636
6e30dd4e
VZ
1637 /* Initialize Rx filtering */
1638 bnx2x_set_rx_mode(bp->dev);
1639
9f6c9258
DK
1640 /* Start fast path */
1641 switch (load_mode) {
1642 case LOAD_NORMAL:
523224a3
DK
1643 /* Tx queue should be only reenabled */
1644 netif_tx_wake_all_queues(bp->dev);
9f6c9258 1645 /* Initialize the receive filter. */
9f6c9258
DK
1646 break;
1647
1648 case LOAD_OPEN:
1649 netif_tx_start_all_queues(bp->dev);
523224a3 1650 smp_mb__after_clear_bit();
9f6c9258
DK
1651 break;
1652
1653 case LOAD_DIAG:
9f6c9258
DK
1654 bp->state = BNX2X_STATE_DIAG;
1655 break;
1656
1657 default:
1658 break;
1659 }
1660
1661 if (!bp->port.pmf)
1662 bnx2x__link_status_update(bp);
1663
1664 /* start the timer */
1665 mod_timer(&bp->timer, jiffies + bp->current_interval);
1666
1667#ifdef BCM_CNIC
1668 bnx2x_setup_cnic_irq_info(bp);
1669 if (bp->state == BNX2X_STATE_OPEN)
1670 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1671#endif
1672 bnx2x_inc_load_cnt(bp);
1673
6891dd25
DK
1674 bnx2x_release_firmware(bp);
1675
9f6c9258
DK
1676 return 0;
1677
1678#ifdef BCM_CNIC
1679load_error4:
1680 /* Disable Timer scan */
1681 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1682#endif
1683load_error3:
1684 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1685
9f6c9258
DK
1686 /* Free SKBs, SGEs, TPA pool and driver internals */
1687 bnx2x_free_skbs(bp);
ec6ba945 1688 for_each_rx_queue(bp, i)
9f6c9258 1689 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1690
9f6c9258 1691 /* Release IRQs */
d6214d7a
DK
1692 bnx2x_free_irq(bp);
1693load_error2:
1694 if (!BP_NOMCP(bp)) {
1695 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1696 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1697 }
1698
1699 bp->port.pmf = 0;
9f6c9258
DK
1700load_error1:
1701 bnx2x_napi_disable(bp);
d6214d7a 1702load_error0:
9f6c9258
DK
1703 bnx2x_free_mem(bp);
1704
6891dd25
DK
1705 bnx2x_release_firmware(bp);
1706
9f6c9258
DK
1707 return rc;
1708}
1709
1710/* must be called with rtnl_lock */
1711int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1712{
1713 int i;
1714
1715 if (bp->state == BNX2X_STATE_CLOSED) {
1716 /* Interface has been removed - nothing to recover */
1717 bp->recovery_state = BNX2X_RECOVERY_DONE;
1718 bp->is_leader = 0;
1719 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1720 smp_wmb();
1721
1722 return -EINVAL;
1723 }
1724
1725#ifdef BCM_CNIC
1726 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1727#endif
1728 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1729
1730 /* Set "drop all" */
1731 bp->rx_mode = BNX2X_RX_MODE_NONE;
1732 bnx2x_set_storm_rx_mode(bp);
1733
f2e0899f
DK
1734 /* Stop Tx */
1735 bnx2x_tx_disable(bp);
f85582f8 1736
9f6c9258 1737 del_timer_sync(&bp->timer);
f85582f8 1738
f2e0899f 1739 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
9f6c9258 1740 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
9f6c9258 1741
f85582f8 1742 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9f6c9258
DK
1743
1744 /* Cleanup the chip if needed */
1745 if (unload_mode != UNLOAD_RECOVERY)
1746 bnx2x_chip_cleanup(bp, unload_mode);
523224a3
DK
1747 else {
1748 /* Disable HW interrupts, NAPI and Tx */
1749 bnx2x_netif_stop(bp, 1);
1750
1751 /* Release IRQs */
d6214d7a 1752 bnx2x_free_irq(bp);
523224a3 1753 }
9f6c9258
DK
1754
1755 bp->port.pmf = 0;
1756
1757 /* Free SKBs, SGEs, TPA pool and driver internals */
1758 bnx2x_free_skbs(bp);
ec6ba945 1759 for_each_rx_queue(bp, i)
9f6c9258 1760 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1761
9f6c9258
DK
1762 bnx2x_free_mem(bp);
1763
1764 bp->state = BNX2X_STATE_CLOSED;
1765
1766 /* The last driver must disable a "close the gate" if there is no
1767 * parity attention or "process kill" pending.
1768 */
1769 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1770 bnx2x_reset_is_done(bp))
1771 bnx2x_disable_close_the_gate(bp);
1772
1773 /* Reset MCP mail box sequence if there is on going recovery */
1774 if (unload_mode == UNLOAD_RECOVERY)
1775 bp->fw_seq = 0;
1776
1777 return 0;
1778}
f85582f8 1779
9f6c9258
DK
1780int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1781{
1782 u16 pmcsr;
1783
adf5f6a1
DK
1784 /* If there is no power capability, silently succeed */
1785 if (!bp->pm_cap) {
1786 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1787 return 0;
1788 }
1789
9f6c9258
DK
1790 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1791
1792 switch (state) {
1793 case PCI_D0:
1794 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1795 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1796 PCI_PM_CTRL_PME_STATUS));
1797
1798 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1799 /* delay required during transition out of D3hot */
1800 msleep(20);
1801 break;
1802
1803 case PCI_D3hot:
1804 /* If there are other clients above don't
1805 shut down the power */
1806 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1807 return 0;
1808 /* Don't shut down the power for emulation and FPGA */
1809 if (CHIP_REV_IS_SLOW(bp))
1810 return 0;
1811
1812 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1813 pmcsr |= 3;
1814
1815 if (bp->wol)
1816 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1817
1818 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1819 pmcsr);
1820
1821 /* No more memory access after this point until
1822 * device is brought back to D0.
1823 */
1824 break;
1825
1826 default:
1827 return -EINVAL;
1828 }
1829 return 0;
1830}
1831
9f6c9258
DK
1832/*
1833 * net_device service functions
1834 */
d6214d7a 1835int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
1836{
1837 int work_done = 0;
1838 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1839 napi);
1840 struct bnx2x *bp = fp->bp;
1841
1842 while (1) {
1843#ifdef BNX2X_STOP_ON_ERROR
1844 if (unlikely(bp->panic)) {
1845 napi_complete(napi);
1846 return 0;
1847 }
1848#endif
1849
1850 if (bnx2x_has_tx_work(fp))
1851 bnx2x_tx_int(fp);
1852
1853 if (bnx2x_has_rx_work(fp)) {
1854 work_done += bnx2x_rx_int(fp, budget - work_done);
1855
1856 /* must not complete if we consumed full budget */
1857 if (work_done >= budget)
1858 break;
1859 }
1860
1861 /* Fall out from the NAPI loop if needed */
1862 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
ec6ba945
VZ
1863#ifdef BCM_CNIC
1864 /* No need to update SB for FCoE L2 ring as long as
1865 * it's connected to the default SB and the SB
1866 * has been updated when NAPI was scheduled.
1867 */
1868 if (IS_FCOE_FP(fp)) {
1869 napi_complete(napi);
1870 break;
1871 }
1872#endif
1873
9f6c9258 1874 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
1875 /* bnx2x_has_rx_work() reads the status block,
1876 * thus we need to ensure that status block indices
1877 * have been actually read (bnx2x_update_fpsb_idx)
1878 * prior to this check (bnx2x_has_rx_work) so that
1879 * we won't write the "newer" value of the status block
1880 * to IGU (if there was a DMA right after
1881 * bnx2x_has_rx_work and if there is no rmb, the memory
1882 * reading (bnx2x_update_fpsb_idx) may be postponed
1883 * to right before bnx2x_ack_sb). In this case there
1884 * will never be another interrupt until there is
1885 * another update of the status block, while there
1886 * is still unhandled work.
1887 */
9f6c9258
DK
1888 rmb();
1889
1890 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1891 napi_complete(napi);
1892 /* Re-enable interrupts */
523224a3
DK
1893 DP(NETIF_MSG_HW,
1894 "Update index to %d\n", fp->fp_hc_idx);
1895 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1896 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
1897 IGU_INT_ENABLE, 1);
1898 break;
1899 }
1900 }
1901 }
1902
1903 return work_done;
1904}
1905
9f6c9258
DK
1906/* we split the first BD into headers and data BDs
1907 * to ease the pain of our fellow microcode engineers
1908 * we use one mapping for both BDs
1909 * So far this has only been observed to happen
1910 * in Other Operating Systems(TM)
1911 */
1912static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1913 struct bnx2x_fastpath *fp,
1914 struct sw_tx_bd *tx_buf,
1915 struct eth_tx_start_bd **tx_bd, u16 hlen,
1916 u16 bd_prod, int nbd)
1917{
1918 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1919 struct eth_tx_bd *d_tx_bd;
1920 dma_addr_t mapping;
1921 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1922
1923 /* first fix first BD */
1924 h_tx_bd->nbd = cpu_to_le16(nbd);
1925 h_tx_bd->nbytes = cpu_to_le16(hlen);
1926
1927 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1928 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1929 h_tx_bd->addr_lo, h_tx_bd->nbd);
1930
1931 /* now get a new data BD
1932 * (after the pbd) and fill it */
1933 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1934 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1935
1936 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1937 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1938
1939 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1940 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1941 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1942
1943 /* this marks the BD as one that has no individual mapping */
1944 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1945
1946 DP(NETIF_MSG_TX_QUEUED,
1947 "TSO split data size is %d (%x:%x)\n",
1948 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1949
1950 /* update tx_bd */
1951 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1952
1953 return bd_prod;
1954}
1955
1956static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1957{
1958 if (fix > 0)
1959 csum = (u16) ~csum_fold(csum_sub(csum,
1960 csum_partial(t_header - fix, fix, 0)));
1961
1962 else if (fix < 0)
1963 csum = (u16) ~csum_fold(csum_add(csum,
1964 csum_partial(t_header, -fix, 0)));
1965
1966 return swab16(csum);
1967}
1968
1969static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1970{
1971 u32 rc;
1972
1973 if (skb->ip_summed != CHECKSUM_PARTIAL)
1974 rc = XMIT_PLAIN;
1975
1976 else {
d0d9d8ef 1977 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
1978 rc = XMIT_CSUM_V6;
1979 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1980 rc |= XMIT_CSUM_TCP;
1981
1982 } else {
1983 rc = XMIT_CSUM_V4;
1984 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1985 rc |= XMIT_CSUM_TCP;
1986 }
1987 }
1988
5892b9e9
VZ
1989 if (skb_is_gso_v6(skb))
1990 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1991 else if (skb_is_gso(skb))
1992 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
1993
1994 return rc;
1995}
1996
1997#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1998/* check if packet requires linearization (packet is too fragmented)
1999 no need to check fragmentation if page size > 8K (there will be no
2000 violation to FW restrictions) */
2001static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2002 u32 xmit_type)
2003{
2004 int to_copy = 0;
2005 int hlen = 0;
2006 int first_bd_sz = 0;
2007
2008 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2009 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2010
2011 if (xmit_type & XMIT_GSO) {
2012 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2013 /* Check if LSO packet needs to be copied:
2014 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2015 int wnd_size = MAX_FETCH_BD - 3;
2016 /* Number of windows to check */
2017 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2018 int wnd_idx = 0;
2019 int frag_idx = 0;
2020 u32 wnd_sum = 0;
2021
2022 /* Headers length */
2023 hlen = (int)(skb_transport_header(skb) - skb->data) +
2024 tcp_hdrlen(skb);
2025
2026 /* Amount of data (w/o headers) on linear part of SKB*/
2027 first_bd_sz = skb_headlen(skb) - hlen;
2028
2029 wnd_sum = first_bd_sz;
2030
2031 /* Calculate the first sum - it's special */
2032 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2033 wnd_sum +=
2034 skb_shinfo(skb)->frags[frag_idx].size;
2035
2036 /* If there was data on linear skb data - check it */
2037 if (first_bd_sz > 0) {
2038 if (unlikely(wnd_sum < lso_mss)) {
2039 to_copy = 1;
2040 goto exit_lbl;
2041 }
2042
2043 wnd_sum -= first_bd_sz;
2044 }
2045
2046 /* Others are easier: run through the frag list and
2047 check all windows */
2048 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2049 wnd_sum +=
2050 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2051
2052 if (unlikely(wnd_sum < lso_mss)) {
2053 to_copy = 1;
2054 break;
2055 }
2056 wnd_sum -=
2057 skb_shinfo(skb)->frags[wnd_idx].size;
2058 }
2059 } else {
2060 /* in non-LSO too fragmented packet should always
2061 be linearized */
2062 to_copy = 1;
2063 }
2064 }
2065
2066exit_lbl:
2067 if (unlikely(to_copy))
2068 DP(NETIF_MSG_TX_QUEUED,
2069 "Linearization IS REQUIRED for %s packet. "
2070 "num_frags %d hlen %d first_bd_sz %d\n",
2071 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2072 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2073
2074 return to_copy;
2075}
2076#endif
2077
2297a2da
VZ
2078static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2079 u32 xmit_type)
f2e0899f 2080{
2297a2da
VZ
2081 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2082 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2083 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
2084 if ((xmit_type & XMIT_GSO_V6) &&
2085 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 2086 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
2087}
2088
2089/**
e8920674 2090 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 2091 *
e8920674
DK
2092 * @skb: packet skb
2093 * @pbd: parse BD
2094 * @xmit_type: xmit flags
f2e0899f
DK
2095 */
2096static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2097 struct eth_tx_parse_bd_e1x *pbd,
2098 u32 xmit_type)
2099{
2100 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2101 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2102 pbd->tcp_flags = pbd_tcp_flags(skb);
2103
2104 if (xmit_type & XMIT_GSO_V4) {
2105 pbd->ip_id = swab16(ip_hdr(skb)->id);
2106 pbd->tcp_pseudo_csum =
2107 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2108 ip_hdr(skb)->daddr,
2109 0, IPPROTO_TCP, 0));
2110
2111 } else
2112 pbd->tcp_pseudo_csum =
2113 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2114 &ipv6_hdr(skb)->daddr,
2115 0, IPPROTO_TCP, 0));
2116
2117 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2118}
f85582f8 2119
f2e0899f 2120/**
e8920674 2121 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 2122 *
e8920674
DK
2123 * @bp: driver handle
2124 * @skb: packet skb
2125 * @parsing_data: data to be updated
2126 * @xmit_type: xmit flags
f2e0899f 2127 *
e8920674 2128 * 57712 related
f2e0899f
DK
2129 */
2130static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 2131 u32 *parsing_data, u32 xmit_type)
f2e0899f 2132{
e39aece7
VZ
2133 *parsing_data |=
2134 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2135 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2136 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 2137
e39aece7
VZ
2138 if (xmit_type & XMIT_CSUM_TCP) {
2139 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2140 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2141 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 2142
e39aece7
VZ
2143 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2144 } else
2145 /* We support checksum offload for TCP and UDP only.
2146 * No need to pass the UDP header length - it's a constant.
2147 */
2148 return skb_transport_header(skb) +
2149 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
2150}
2151
2152/**
e8920674 2153 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 2154 *
e8920674
DK
2155 * @bp: driver handle
2156 * @skb: packet skb
2157 * @pbd: parse BD to be updated
2158 * @xmit_type: xmit flags
f2e0899f
DK
2159 */
2160static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2161 struct eth_tx_parse_bd_e1x *pbd,
2162 u32 xmit_type)
2163{
e39aece7 2164 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
2165
2166 /* for now NS flag is not used in Linux */
2167 pbd->global_data =
2168 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2169 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2170
2171 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 2172 skb_network_header(skb)) >> 1;
f2e0899f 2173
e39aece7
VZ
2174 hlen += pbd->ip_hlen_w;
2175
2176 /* We support checksum offload for TCP and UDP only */
2177 if (xmit_type & XMIT_CSUM_TCP)
2178 hlen += tcp_hdrlen(skb) / 2;
2179 else
2180 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
2181
2182 pbd->total_hlen_w = cpu_to_le16(hlen);
2183 hlen = hlen*2;
2184
2185 if (xmit_type & XMIT_CSUM_TCP) {
2186 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2187
2188 } else {
2189 s8 fix = SKB_CS_OFF(skb); /* signed! */
2190
2191 DP(NETIF_MSG_TX_QUEUED,
2192 "hlen %d fix %d csum before fix %x\n",
2193 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2194
2195 /* HW bug: fixup the CSUM */
2196 pbd->tcp_pseudo_csum =
2197 bnx2x_csum_fix(skb_transport_header(skb),
2198 SKB_CS(skb), fix);
2199
2200 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2201 pbd->tcp_pseudo_csum);
2202 }
2203
2204 return hlen;
2205}
f85582f8 2206
9f6c9258
DK
2207/* called with netif_tx_lock
2208 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2209 * netif_wake_queue()
2210 */
2211netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2212{
2213 struct bnx2x *bp = netdev_priv(dev);
2214 struct bnx2x_fastpath *fp;
2215 struct netdev_queue *txq;
2216 struct sw_tx_bd *tx_buf;
2217 struct eth_tx_start_bd *tx_start_bd;
2218 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 2219 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 2220 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 2221 u32 pbd_e2_parsing_data = 0;
9f6c9258
DK
2222 u16 pkt_prod, bd_prod;
2223 int nbd, fp_index;
2224 dma_addr_t mapping;
2225 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2226 int i;
2227 u8 hlen = 0;
2228 __le16 pkt_size = 0;
2229 struct ethhdr *eth;
2230 u8 mac_type = UNICAST_ADDRESS;
2231
2232#ifdef BNX2X_STOP_ON_ERROR
2233 if (unlikely(bp->panic))
2234 return NETDEV_TX_BUSY;
2235#endif
2236
2237 fp_index = skb_get_queue_mapping(skb);
2238 txq = netdev_get_tx_queue(dev, fp_index);
2239
2240 fp = &bp->fp[fp_index];
2241
2242 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2243 fp->eth_q_stats.driver_xoff++;
2244 netif_tx_stop_queue(txq);
2245 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2246 return NETDEV_TX_BUSY;
2247 }
2248
f2e0899f
DK
2249 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2250 "protocol(%x,%x) gso type %x xmit_type %x\n",
2251 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
2252 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2253
2254 eth = (struct ethhdr *)skb->data;
2255
2256 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2257 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2258 if (is_broadcast_ether_addr(eth->h_dest))
2259 mac_type = BROADCAST_ADDRESS;
2260 else
2261 mac_type = MULTICAST_ADDRESS;
2262 }
2263
2264#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2265 /* First, check if we need to linearize the skb (due to FW
2266 restrictions). No need to check fragmentation if page size > 8K
2267 (there will be no violation to FW restrictions) */
2268 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2269 /* Statistics of linearization */
2270 bp->lin_cnt++;
2271 if (skb_linearize(skb) != 0) {
2272 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2273 "silently dropping this SKB\n");
2274 dev_kfree_skb_any(skb);
2275 return NETDEV_TX_OK;
2276 }
2277 }
2278#endif
2279
2280 /*
2281 Please read carefully. First we use one BD which we mark as start,
2282 then we have a parsing info BD (used for TSO or xsum),
2283 and only then we have the rest of the TSO BDs.
2284 (don't forget to mark the last one as last,
2285 and to unmap only AFTER you write to the BD ...)
2286 And above all, all pdb sizes are in words - NOT DWORDS!
2287 */
2288
2289 pkt_prod = fp->tx_pkt_prod++;
2290 bd_prod = TX_BD(fp->tx_bd_prod);
2291
2292 /* get a tx_buf and first BD */
2293 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2294 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2295
2296 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2297 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2298 mac_type);
2299
9f6c9258 2300 /* header nbd */
f85582f8 2301 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2302
2303 /* remember the first BD of the packet */
2304 tx_buf->first_bd = fp->tx_bd_prod;
2305 tx_buf->skb = skb;
2306 tx_buf->flags = 0;
2307
2308 DP(NETIF_MSG_TX_QUEUED,
2309 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2310 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2311
eab6d18d 2312 if (vlan_tx_tag_present(skb)) {
523224a3
DK
2313 tx_start_bd->vlan_or_ethertype =
2314 cpu_to_le16(vlan_tx_tag_get(skb));
2315 tx_start_bd->bd_flags.as_bitfield |=
2316 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 2317 } else
523224a3 2318 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2319
2320 /* turn on parsing and get a BD */
2321 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2322
523224a3
DK
2323 if (xmit_type & XMIT_CSUM) {
2324 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2325
2326 if (xmit_type & XMIT_CSUM_V4)
2327 tx_start_bd->bd_flags.as_bitfield |=
2328 ETH_TX_BD_FLAGS_IP_CSUM;
2329 else
2330 tx_start_bd->bd_flags.as_bitfield |=
2331 ETH_TX_BD_FLAGS_IPV6;
9f6c9258 2332
523224a3
DK
2333 if (!(xmit_type & XMIT_CSUM_TCP))
2334 tx_start_bd->bd_flags.as_bitfield |=
2335 ETH_TX_BD_FLAGS_IS_UDP;
2336 }
9f6c9258 2337
f2e0899f
DK
2338 if (CHIP_IS_E2(bp)) {
2339 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2340 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2341 /* Set PBD in checksum offload case */
2342 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
2343 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2344 &pbd_e2_parsing_data,
2345 xmit_type);
f2e0899f
DK
2346 } else {
2347 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2348 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2349 /* Set PBD in checksum offload case */
2350 if (xmit_type & XMIT_CSUM)
2351 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2352
9f6c9258
DK
2353 }
2354
f85582f8 2355 /* Map skb linear data for DMA */
9f6c9258
DK
2356 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2357 skb_headlen(skb), DMA_TO_DEVICE);
2358
f85582f8 2359 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
2360 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2361 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2362 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2363 tx_start_bd->nbd = cpu_to_le16(nbd);
2364 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2365 pkt_size = tx_start_bd->nbytes;
2366
2367 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2368 " nbytes %d flags %x vlan %x\n",
2369 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2370 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2371 tx_start_bd->bd_flags.as_bitfield,
2372 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2373
2374 if (xmit_type & XMIT_GSO) {
2375
2376 DP(NETIF_MSG_TX_QUEUED,
2377 "TSO packet len %d hlen %d total len %d tso size %d\n",
2378 skb->len, hlen, skb_headlen(skb),
2379 skb_shinfo(skb)->gso_size);
2380
2381 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2382
2383 if (unlikely(skb_headlen(skb) > hlen))
2384 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2385 hlen, bd_prod, ++nbd);
f2e0899f 2386 if (CHIP_IS_E2(bp))
2297a2da
VZ
2387 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2388 xmit_type);
f2e0899f
DK
2389 else
2390 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 2391 }
2297a2da
VZ
2392
2393 /* Set the PBD's parsing_data field if not zero
2394 * (for the chips newer than 57711).
2395 */
2396 if (pbd_e2_parsing_data)
2397 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2398
9f6c9258
DK
2399 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2400
f85582f8 2401 /* Handle fragmented skb */
9f6c9258
DK
2402 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2403 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2404
2405 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2406 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2407 if (total_pkt_bd == NULL)
2408 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2409
2410 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2411 frag->page_offset,
2412 frag->size, DMA_TO_DEVICE);
2413
2414 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2415 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2416 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2417 le16_add_cpu(&pkt_size, frag->size);
2418
2419 DP(NETIF_MSG_TX_QUEUED,
2420 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2421 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2422 le16_to_cpu(tx_data_bd->nbytes));
2423 }
2424
2425 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2426
2427 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2428
2429 /* now send a tx doorbell, counting the next BD
2430 * if the packet contains or ends with it
2431 */
2432 if (TX_BD_POFF(bd_prod) < nbd)
2433 nbd++;
2434
2435 if (total_pkt_bd != NULL)
2436 total_pkt_bd->total_pkt_bytes = pkt_size;
2437
523224a3 2438 if (pbd_e1x)
9f6c9258 2439 DP(NETIF_MSG_TX_QUEUED,
523224a3 2440 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2441 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2442 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2443 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2444 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2445 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2446 if (pbd_e2)
2447 DP(NETIF_MSG_TX_QUEUED,
2448 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2449 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2450 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2451 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2452 pbd_e2->parsing_data);
9f6c9258
DK
2453 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2454
2455 /*
2456 * Make sure that the BD data is updated before updating the producer
2457 * since FW might read the BD right after the producer is updated.
2458 * This is only applicable for weak-ordered memory model archs such
2459 * as IA-64. The following barrier is also mandatory since FW will
2460 * assumes packets must have BDs.
2461 */
2462 wmb();
2463
2464 fp->tx_db.data.prod += nbd;
2465 barrier();
f85582f8 2466
523224a3 2467 DOORBELL(bp, fp->cid, fp->tx_db.raw);
9f6c9258
DK
2468
2469 mmiowb();
2470
2471 fp->tx_bd_prod += nbd;
2472
2473 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2474 netif_tx_stop_queue(txq);
2475
2476 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2477 * ordering of set_bit() in netif_tx_stop_queue() and read of
2478 * fp->bd_tx_cons */
2479 smp_mb();
2480
2481 fp->eth_q_stats.driver_xoff++;
2482 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2483 netif_tx_wake_queue(txq);
2484 }
2485 fp->tx_pkt++;
2486
2487 return NETDEV_TX_OK;
2488}
f85582f8 2489
9f6c9258
DK
2490/* called with rtnl_lock */
2491int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2492{
2493 struct sockaddr *addr = p;
2494 struct bnx2x *bp = netdev_priv(dev);
2495
2496 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2497 return -EINVAL;
2498
2499 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
523224a3
DK
2500 if (netif_running(dev))
2501 bnx2x_set_eth_mac(bp, 1);
9f6c9258
DK
2502
2503 return 0;
2504}
2505
b3b83c3f
DK
2506static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2507{
2508 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2509 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2510
2511 /* Common */
2512#ifdef BCM_CNIC
2513 if (IS_FCOE_IDX(fp_index)) {
2514 memset(sb, 0, sizeof(union host_hc_status_block));
2515 fp->status_blk_mapping = 0;
2516
2517 } else {
2518#endif
2519 /* status blocks */
2520 if (CHIP_IS_E2(bp))
2521 BNX2X_PCI_FREE(sb->e2_sb,
2522 bnx2x_fp(bp, fp_index,
2523 status_blk_mapping),
2524 sizeof(struct host_hc_status_block_e2));
2525 else
2526 BNX2X_PCI_FREE(sb->e1x_sb,
2527 bnx2x_fp(bp, fp_index,
2528 status_blk_mapping),
2529 sizeof(struct host_hc_status_block_e1x));
2530#ifdef BCM_CNIC
2531 }
2532#endif
2533 /* Rx */
2534 if (!skip_rx_queue(bp, fp_index)) {
2535 bnx2x_free_rx_bds(fp);
2536
2537 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2538 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2539 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2540 bnx2x_fp(bp, fp_index, rx_desc_mapping),
2541 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2542
2543 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2544 bnx2x_fp(bp, fp_index, rx_comp_mapping),
2545 sizeof(struct eth_fast_path_rx_cqe) *
2546 NUM_RCQ_BD);
2547
2548 /* SGE ring */
2549 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2550 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2551 bnx2x_fp(bp, fp_index, rx_sge_mapping),
2552 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2553 }
2554
2555 /* Tx */
2556 if (!skip_tx_queue(bp, fp_index)) {
2557 /* fastpath tx rings: tx_buf tx_desc */
2558 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2559 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2560 bnx2x_fp(bp, fp_index, tx_desc_mapping),
2561 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2562 }
2563 /* end of fastpath */
2564}
2565
2566void bnx2x_free_fp_mem(struct bnx2x *bp)
2567{
2568 int i;
2569 for_each_queue(bp, i)
2570 bnx2x_free_fp_mem_at(bp, i);
2571}
2572
2573static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2574{
2575 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2576 if (CHIP_IS_E2(bp)) {
2577 bnx2x_fp(bp, index, sb_index_values) =
2578 (__le16 *)status_blk.e2_sb->sb.index_values;
2579 bnx2x_fp(bp, index, sb_running_index) =
2580 (__le16 *)status_blk.e2_sb->sb.running_index;
2581 } else {
2582 bnx2x_fp(bp, index, sb_index_values) =
2583 (__le16 *)status_blk.e1x_sb->sb.index_values;
2584 bnx2x_fp(bp, index, sb_running_index) =
2585 (__le16 *)status_blk.e1x_sb->sb.running_index;
2586 }
2587}
2588
2589static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2590{
2591 union host_hc_status_block *sb;
2592 struct bnx2x_fastpath *fp = &bp->fp[index];
2593 int ring_size = 0;
2594
2595 /* if rx_ring_size specified - use it */
2596 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2597 MAX_RX_AVAIL/bp->num_queues;
2598
2599 /* allocate at least number of buffers required by FW */
2600 rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2601 MIN_RX_SIZE_TPA,
2602 rx_ring_size);
2603
2604 bnx2x_fp(bp, index, bp) = bp;
2605 bnx2x_fp(bp, index, index) = index;
2606
2607 /* Common */
2608 sb = &bnx2x_fp(bp, index, status_blk);
2609#ifdef BCM_CNIC
2610 if (!IS_FCOE_IDX(index)) {
2611#endif
2612 /* status blocks */
2613 if (CHIP_IS_E2(bp))
2614 BNX2X_PCI_ALLOC(sb->e2_sb,
2615 &bnx2x_fp(bp, index, status_blk_mapping),
2616 sizeof(struct host_hc_status_block_e2));
2617 else
2618 BNX2X_PCI_ALLOC(sb->e1x_sb,
2619 &bnx2x_fp(bp, index, status_blk_mapping),
2620 sizeof(struct host_hc_status_block_e1x));
2621#ifdef BCM_CNIC
2622 }
2623#endif
2624 set_sb_shortcuts(bp, index);
2625
2626 /* Tx */
2627 if (!skip_tx_queue(bp, index)) {
2628 /* fastpath tx rings: tx_buf tx_desc */
2629 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2630 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2631 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2632 &bnx2x_fp(bp, index, tx_desc_mapping),
2633 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2634 }
2635
2636 /* Rx */
2637 if (!skip_rx_queue(bp, index)) {
2638 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2639 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2640 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2641 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2642 &bnx2x_fp(bp, index, rx_desc_mapping),
2643 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2644
2645 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2646 &bnx2x_fp(bp, index, rx_comp_mapping),
2647 sizeof(struct eth_fast_path_rx_cqe) *
2648 NUM_RCQ_BD);
2649
2650 /* SGE ring */
2651 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2652 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2653 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2654 &bnx2x_fp(bp, index, rx_sge_mapping),
2655 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2656 /* RX BD ring */
2657 bnx2x_set_next_page_rx_bd(fp);
2658
2659 /* CQ ring */
2660 bnx2x_set_next_page_rx_cq(fp);
2661
2662 /* BDs */
2663 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2664 if (ring_size < rx_ring_size)
2665 goto alloc_mem_err;
2666 }
2667
2668 return 0;
2669
2670/* handles low memory cases */
2671alloc_mem_err:
2672 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2673 index, ring_size);
2674 /* FW will drop all packets if queue is not big enough,
2675 * In these cases we disable the queue
2676 * Min size diferent for TPA and non-TPA queues
2677 */
2678 if (ring_size < (fp->disable_tpa ?
eb722d7a 2679 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
2680 /* release memory allocated for this queue */
2681 bnx2x_free_fp_mem_at(bp, index);
2682 return -ENOMEM;
2683 }
2684 return 0;
2685}
2686
2687int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2688{
2689 int i;
2690
2691 /**
2692 * 1. Allocate FP for leading - fatal if error
2693 * 2. {CNIC} Allocate FCoE FP - fatal if error
2694 * 3. Allocate RSS - fix number of queues if error
2695 */
2696
2697 /* leading */
2698 if (bnx2x_alloc_fp_mem_at(bp, 0))
2699 return -ENOMEM;
2700#ifdef BCM_CNIC
2701 /* FCoE */
2702 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2703 return -ENOMEM;
2704#endif
2705 /* RSS */
2706 for_each_nondefault_eth_queue(bp, i)
2707 if (bnx2x_alloc_fp_mem_at(bp, i))
2708 break;
2709
2710 /* handle memory failures */
2711 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2712 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2713
2714 WARN_ON(delta < 0);
2715#ifdef BCM_CNIC
2716 /**
2717 * move non eth FPs next to last eth FP
2718 * must be done in that order
2719 * FCOE_IDX < FWD_IDX < OOO_IDX
2720 */
2721
2722 /* move FCoE fp */
2723 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
2724#endif
2725 bp->num_queues -= delta;
2726 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2727 bp->num_queues + delta, bp->num_queues);
2728 }
2729
2730 return 0;
2731}
d6214d7a 2732
8d96286a 2733static int bnx2x_setup_irqs(struct bnx2x *bp)
d6214d7a
DK
2734{
2735 int rc = 0;
2736 if (bp->flags & USING_MSIX_FLAG) {
2737 rc = bnx2x_req_msix_irqs(bp);
2738 if (rc)
2739 return rc;
2740 } else {
2741 bnx2x_ack_int(bp);
2742 rc = bnx2x_req_irq(bp);
2743 if (rc) {
2744 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2745 return rc;
2746 }
2747 if (bp->flags & USING_MSI_FLAG) {
2748 bp->dev->irq = bp->pdev->irq;
2749 netdev_info(bp->dev, "using MSI IRQ %d\n",
2750 bp->pdev->irq);
2751 }
2752 }
2753
2754 return 0;
2755}
2756
523224a3
DK
2757void bnx2x_free_mem_bp(struct bnx2x *bp)
2758{
2759 kfree(bp->fp);
2760 kfree(bp->msix_table);
2761 kfree(bp->ilt);
2762}
2763
2764int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2765{
2766 struct bnx2x_fastpath *fp;
2767 struct msix_entry *tbl;
2768 struct bnx2x_ilt *ilt;
2769
2770 /* fp array */
2771 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2772 if (!fp)
2773 goto alloc_err;
2774 bp->fp = fp;
2775
2776 /* msix table */
ec6ba945 2777 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
523224a3
DK
2778 GFP_KERNEL);
2779 if (!tbl)
2780 goto alloc_err;
2781 bp->msix_table = tbl;
2782
2783 /* ilt */
2784 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2785 if (!ilt)
2786 goto alloc_err;
2787 bp->ilt = ilt;
2788
2789 return 0;
2790alloc_err:
2791 bnx2x_free_mem_bp(bp);
2792 return -ENOMEM;
2793
2794}
2795
66371c44
MM
2796static int bnx2x_reload_if_running(struct net_device *dev)
2797{
2798 struct bnx2x *bp = netdev_priv(dev);
2799
2800 if (unlikely(!netif_running(dev)))
2801 return 0;
2802
2803 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2804 return bnx2x_nic_load(bp, LOAD_NORMAL);
2805}
2806
1ac9e428
YR
2807int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
2808{
2809 u32 sel_phy_idx = 0;
2810 if (bp->link_params.num_phys <= 1)
2811 return INT_PHY;
2812
2813 if (bp->link_vars.link_up) {
2814 sel_phy_idx = EXT_PHY1;
2815 /* In case link is SERDES, check if the EXT_PHY2 is the one */
2816 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
2817 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
2818 sel_phy_idx = EXT_PHY2;
2819 } else {
2820
2821 switch (bnx2x_phy_selection(&bp->link_params)) {
2822 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
2823 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
2824 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
2825 sel_phy_idx = EXT_PHY1;
2826 break;
2827 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
2828 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
2829 sel_phy_idx = EXT_PHY2;
2830 break;
2831 }
2832 }
2833
2834 return sel_phy_idx;
2835
2836}
2837int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
2838{
2839 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
2840 /*
2841 * The selected actived PHY is always after swapping (in case PHY
2842 * swapping is enabled). So when swapping is enabled, we need to reverse
2843 * the configuration
2844 */
2845
2846 if (bp->link_params.multi_phy_config &
2847 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
2848 if (sel_phy_idx == EXT_PHY1)
2849 sel_phy_idx = EXT_PHY2;
2850 else if (sel_phy_idx == EXT_PHY2)
2851 sel_phy_idx = EXT_PHY1;
2852 }
2853 return LINK_CONFIG_IDX(sel_phy_idx);
2854}
2855
9f6c9258
DK
2856/* called with rtnl_lock */
2857int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2858{
2859 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
2860
2861 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2862 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2863 return -EAGAIN;
2864 }
2865
2866 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2867 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2868 return -EINVAL;
2869
2870 /* This does not race with packet allocation
2871 * because the actual alloc size is
2872 * only updated as part of load
2873 */
2874 dev->mtu = new_mtu;
2875
66371c44
MM
2876 return bnx2x_reload_if_running(dev);
2877}
2878
2879u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2880{
2881 struct bnx2x *bp = netdev_priv(dev);
2882
2883 /* TPA requires Rx CSUM offloading */
2884 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2885 features &= ~NETIF_F_LRO;
2886
2887 return features;
2888}
2889
2890int bnx2x_set_features(struct net_device *dev, u32 features)
2891{
2892 struct bnx2x *bp = netdev_priv(dev);
2893 u32 flags = bp->flags;
538dd2e3 2894 bool bnx2x_reload = false;
66371c44
MM
2895
2896 if (features & NETIF_F_LRO)
2897 flags |= TPA_ENABLE_FLAG;
2898 else
2899 flags &= ~TPA_ENABLE_FLAG;
2900
538dd2e3
MB
2901 if (features & NETIF_F_LOOPBACK) {
2902 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
2903 bp->link_params.loopback_mode = LOOPBACK_BMAC;
2904 bnx2x_reload = true;
2905 }
2906 } else {
2907 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
2908 bp->link_params.loopback_mode = LOOPBACK_NONE;
2909 bnx2x_reload = true;
2910 }
2911 }
2912
66371c44
MM
2913 if (flags ^ bp->flags) {
2914 bp->flags = flags;
538dd2e3
MB
2915 bnx2x_reload = true;
2916 }
66371c44 2917
538dd2e3 2918 if (bnx2x_reload) {
66371c44
MM
2919 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2920 return bnx2x_reload_if_running(dev);
2921 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
2922 }
2923
66371c44 2924 return 0;
9f6c9258
DK
2925}
2926
2927void bnx2x_tx_timeout(struct net_device *dev)
2928{
2929 struct bnx2x *bp = netdev_priv(dev);
2930
2931#ifdef BNX2X_STOP_ON_ERROR
2932 if (!bp->panic)
2933 bnx2x_panic();
2934#endif
2935 /* This allows the netif to be shutdown gracefully before resetting */
2936 schedule_delayed_work(&bp->reset_task, 0);
2937}
2938
9f6c9258
DK
2939int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2940{
2941 struct net_device *dev = pci_get_drvdata(pdev);
2942 struct bnx2x *bp;
2943
2944 if (!dev) {
2945 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2946 return -ENODEV;
2947 }
2948 bp = netdev_priv(dev);
2949
2950 rtnl_lock();
2951
2952 pci_save_state(pdev);
2953
2954 if (!netif_running(dev)) {
2955 rtnl_unlock();
2956 return 0;
2957 }
2958
2959 netif_device_detach(dev);
2960
2961 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2962
2963 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2964
2965 rtnl_unlock();
2966
2967 return 0;
2968}
2969
2970int bnx2x_resume(struct pci_dev *pdev)
2971{
2972 struct net_device *dev = pci_get_drvdata(pdev);
2973 struct bnx2x *bp;
2974 int rc;
2975
2976 if (!dev) {
2977 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2978 return -ENODEV;
2979 }
2980 bp = netdev_priv(dev);
2981
2982 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2983 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2984 return -EAGAIN;
2985 }
2986
2987 rtnl_lock();
2988
2989 pci_restore_state(pdev);
2990
2991 if (!netif_running(dev)) {
2992 rtnl_unlock();
2993 return 0;
2994 }
2995
2996 bnx2x_set_power_state(bp, PCI_D0);
2997 netif_device_attach(dev);
2998
f2e0899f
DK
2999 /* Since the chip was reset, clear the FW sequence number */
3000 bp->fw_seq = 0;
9f6c9258
DK
3001 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3002
3003 rtnl_unlock();
3004
3005 return rc;
3006}