bnx2x: fix link notification
[linux-2.6-block.git] / drivers / net / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
9f6c9258 18#include <linux/etherdevice.h>
9bcc0893 19#include <linux/if_vlan.h>
9f6c9258 20#include <linux/ip.h>
f2e0899f 21#include <net/ipv6.h>
7f3e01fe 22#include <net/ip6_checksum.h>
6891dd25 23#include <linux/firmware.h>
9f6c9258
DK
24#include "bnx2x_cmn.h"
25
523224a3
DK
26#include "bnx2x_init.h"
27
8d96286a 28static int bnx2x_setup_irqs(struct bnx2x *bp);
9f6c9258
DK
29
30/* free skb in the packet ring at pos idx
31 * return idx of last bd freed
32 */
33static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
34 u16 idx)
35{
36 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37 struct eth_tx_start_bd *tx_start_bd;
38 struct eth_tx_bd *tx_data_bd;
39 struct sk_buff *skb = tx_buf->skb;
40 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
41 int nbd;
42
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
44 prefetch(&skb->end);
45
46 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
47 idx, tx_buf, skb);
48
49 /* unmap first bd */
50 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 53 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
54
55 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56#ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58 BNX2X_ERR("BAD nbd!\n");
59 bnx2x_panic();
60 }
61#endif
62 new_cons = nbd + tx_buf->first_bd;
63
64 /* Get the next bd */
65 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66
67 /* Skip a parse bd... */
68 --nbd;
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73 --nbd;
74 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75 }
76
77 /* now free frags */
78 while (nbd > 0) {
79
80 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84 if (--nbd)
85 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
86 }
87
88 /* release skb */
89 WARN_ON(!skb);
90 dev_kfree_skb(skb);
91 tx_buf->first_bd = 0;
92 tx_buf->skb = NULL;
93
94 return new_cons;
95}
96
97int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98{
99 struct bnx2x *bp = fp->bp;
100 struct netdev_queue *txq;
101 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102
103#ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp->panic))
105 return -1;
106#endif
107
108 txq = netdev_get_tx_queue(bp->dev, fp->index);
109 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110 sw_cons = fp->tx_pkt_cons;
111
112 while (sw_cons != hw_cons) {
113 u16 pkt_cons;
114
115 pkt_cons = TX_BD(sw_cons);
116
f2e0899f
DK
117 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
118 " pkt_cons %u\n",
119 fp->index, hw_cons, sw_cons, pkt_cons);
9f6c9258 120
9f6c9258
DK
121 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
122 sw_cons++;
123 }
124
125 fp->tx_pkt_cons = sw_cons;
126 fp->tx_bd_cons = bd_cons;
127
128 /* Need to make the tx_bd_cons update visible to start_xmit()
129 * before checking for netif_tx_queue_stopped(). Without the
130 * memory barrier, there is a small possibility that
131 * start_xmit() will miss it and cause the queue to be stopped
132 * forever.
133 */
134 smp_mb();
135
9f6c9258
DK
136 if (unlikely(netif_tx_queue_stopped(txq))) {
137 /* Taking tx_lock() is needed to prevent reenabling the queue
138 * while it's empty. This could have happen if rx_action() gets
139 * suspended in bnx2x_tx_int() after the condition before
140 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
141 *
142 * stops the queue->sees fresh tx_bd_cons->releases the queue->
143 * sends some packets consuming the whole queue again->
144 * stops the queue
145 */
146
147 __netif_tx_lock(txq, smp_processor_id());
148
149 if ((netif_tx_queue_stopped(txq)) &&
150 (bp->state == BNX2X_STATE_OPEN) &&
151 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
152 netif_tx_wake_queue(txq);
153
154 __netif_tx_unlock(txq);
155 }
156 return 0;
157}
158
159static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
160 u16 idx)
161{
162 u16 last_max = fp->last_max_sge;
163
164 if (SUB_S16(idx, last_max) > 0)
165 fp->last_max_sge = idx;
166}
167
168static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
169 struct eth_fast_path_rx_cqe *fp_cqe)
170{
171 struct bnx2x *bp = fp->bp;
172 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
173 le16_to_cpu(fp_cqe->len_on_bd)) >>
174 SGE_PAGE_SHIFT;
175 u16 last_max, last_elem, first_elem;
176 u16 delta = 0;
177 u16 i;
178
179 if (!sge_len)
180 return;
181
182 /* First mark all used pages */
183 for (i = 0; i < sge_len; i++)
523224a3
DK
184 SGE_MASK_CLEAR_BIT(fp,
185 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
186
187 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 188 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
189
190 /* Here we assume that the last SGE index is the biggest */
191 prefetch((void *)(fp->sge_mask));
523224a3
DK
192 bnx2x_update_last_max_sge(fp,
193 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
194
195 last_max = RX_SGE(fp->last_max_sge);
196 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
197 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
198
199 /* If ring is not full */
200 if (last_elem + 1 != first_elem)
201 last_elem++;
202
203 /* Now update the prod */
204 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
205 if (likely(fp->sge_mask[i]))
206 break;
207
208 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
209 delta += RX_SGE_MASK_ELEM_SZ;
210 }
211
212 if (delta > 0) {
213 fp->rx_sge_prod += delta;
214 /* clear page-end entries */
215 bnx2x_clear_sge_mask_next_elems(fp);
216 }
217
218 DP(NETIF_MSG_RX_STATUS,
219 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
220 fp->last_max_sge, fp->rx_sge_prod);
221}
222
223static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
224 struct sk_buff *skb, u16 cons, u16 prod)
225{
226 struct bnx2x *bp = fp->bp;
227 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
228 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
229 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
230 dma_addr_t mapping;
231
232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
235 bp->rx_buf_size, DMA_FROM_DEVICE);
236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237
238 /* move partial skb from cons to pool (don't unmap yet) */
239 fp->tpa_pool[queue] = *cons_rx_buf;
240
241 /* mark bin state as start - print error if current state != stop */
242 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
243 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
244
245 fp->tpa_state[queue] = BNX2X_TPA_START;
246
247 /* point prod_bd to new skb */
248 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
249 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
250
251#ifdef BNX2X_STOP_ON_ERROR
252 fp->tpa_queue_used |= (1 << queue);
253#ifdef _ASM_GENERIC_INT_L64_H
254 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
255#else
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
257#endif
258 fp->tpa_queue_used);
259#endif
260}
261
e4e3c02a
VZ
262/* Timestamp option length allowed for TPA aggregation:
263 *
264 * nop nop kind length echo val
265 */
266#define TPA_TSTAMP_OPT_LEN 12
267/**
268 * Calculate the approximate value of the MSS for this
269 * aggregation using the first packet of it.
270 *
271 * @param bp
272 * @param parsing_flags Parsing flags from the START CQE
273 * @param len_on_bd Total length of the first packet for the
274 * aggregation.
275 */
276static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
277 u16 len_on_bd)
278{
279 /* TPA arrgregation won't have an IP options and TCP options
280 * other than timestamp.
281 */
282 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
283
284
285 /* Check if there was a TCP timestamp, if there is it's will
286 * always be 12 bytes length: nop nop kind length echo val.
287 *
288 * Otherwise FW would close the aggregation.
289 */
290 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
291 hdrs_len += TPA_TSTAMP_OPT_LEN;
292
293 return len_on_bd - hdrs_len;
294}
295
9f6c9258
DK
296static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
297 struct sk_buff *skb,
298 struct eth_fast_path_rx_cqe *fp_cqe,
e4e3c02a 299 u16 cqe_idx, u16 parsing_flags)
9f6c9258
DK
300{
301 struct sw_rx_page *rx_pg, old_rx_pg;
302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
303 u32 i, frag_len, frag_size, pages;
304 int err;
305 int j;
306
307 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
308 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
309
310 /* This is needed in order to enable forwarding support */
311 if (frag_size)
e4e3c02a
VZ
312 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
313 len_on_bd);
9f6c9258
DK
314
315#ifdef BNX2X_STOP_ON_ERROR
316 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
317 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
318 pages, cqe_idx);
319 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
320 fp_cqe->pkt_len, len_on_bd);
321 bnx2x_panic();
322 return -EINVAL;
323 }
324#endif
325
326 /* Run through the SGL and compose the fragmented skb */
327 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
523224a3
DK
328 u16 sge_idx =
329 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
330
331 /* FW gives the indices of the SGE as if the ring is an array
332 (meaning that "next" element will consume 2 indices) */
333 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
334 rx_pg = &fp->rx_page_ring[sge_idx];
335 old_rx_pg = *rx_pg;
336
337 /* If we fail to allocate a substitute page, we simply stop
338 where we are and drop the whole packet */
339 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
340 if (unlikely(err)) {
341 fp->eth_q_stats.rx_skb_alloc_failed++;
342 return err;
343 }
344
345 /* Unmap the page as we r going to pass it to the stack */
346 dma_unmap_page(&bp->pdev->dev,
347 dma_unmap_addr(&old_rx_pg, mapping),
348 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
349
350 /* Add one frag and update the appropriate fields in the skb */
351 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
352
353 skb->data_len += frag_len;
354 skb->truesize += frag_len;
355 skb->len += frag_len;
356
357 frag_size -= frag_len;
358 }
359
360 return 0;
361}
362
363static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
364 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
365 u16 cqe_idx)
366{
367 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
368 struct sk_buff *skb = rx_buf->skb;
369 /* alloc new skb */
370 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
371
372 /* Unmap skb in the pool anyway, as we are going to change
373 pool entry status to BNX2X_TPA_STOP even if new skb allocation
374 fails. */
375 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
376 bp->rx_buf_size, DMA_FROM_DEVICE);
377
378 if (likely(new_skb)) {
379 /* fix ip xsum and give it to the stack */
380 /* (no need to map the new skb) */
e4e3c02a
VZ
381 u16 parsing_flags =
382 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
9f6c9258
DK
383
384 prefetch(skb);
217de5aa 385 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
386
387#ifdef BNX2X_STOP_ON_ERROR
388 if (pad + len > bp->rx_buf_size) {
389 BNX2X_ERR("skb_put is about to fail... "
390 "pad %d len %d rx_buf_size %d\n",
391 pad, len, bp->rx_buf_size);
392 bnx2x_panic();
393 return;
394 }
395#endif
396
397 skb_reserve(skb, pad);
398 skb_put(skb, len);
399
400 skb->protocol = eth_type_trans(skb, bp->dev);
401 skb->ip_summed = CHECKSUM_UNNECESSARY;
402
403 {
404 struct iphdr *iph;
405
406 iph = (struct iphdr *)skb->data;
9f6c9258
DK
407 iph->check = 0;
408 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
409 }
410
411 if (!bnx2x_fill_frag_skb(bp, fp, skb,
e4e3c02a
VZ
412 &cqe->fast_path_cqe, cqe_idx,
413 parsing_flags)) {
414 if (parsing_flags & PARSING_FLAGS_VLAN)
9bcc0893 415 __vlan_hwaccel_put_tag(skb,
9f6c9258 416 le16_to_cpu(cqe->fast_path_cqe.
9bcc0893
HZ
417 vlan_tag));
418 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
419 } else {
420 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
421 " - dropping packet!\n");
422 dev_kfree_skb(skb);
423 }
424
425
426 /* put new skb in bin */
427 fp->tpa_pool[queue].skb = new_skb;
428
429 } else {
430 /* else drop the packet and keep the buffer in the bin */
431 DP(NETIF_MSG_RX_STATUS,
432 "Failed to allocate new skb - dropping packet!\n");
433 fp->eth_q_stats.rx_skb_alloc_failed++;
434 }
435
436 fp->tpa_state[queue] = BNX2X_TPA_STOP;
437}
438
439/* Set Toeplitz hash value in the skb using the value from the
440 * CQE (calculated by HW).
441 */
442static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
443 struct sk_buff *skb)
444{
445 /* Set Toeplitz hash from CQE */
446 if ((bp->dev->features & NETIF_F_RXHASH) &&
447 (cqe->fast_path_cqe.status_flags &
448 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
449 skb->rxhash =
450 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
451}
452
453int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
454{
455 struct bnx2x *bp = fp->bp;
456 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
457 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
458 int rx_pkt = 0;
459
460#ifdef BNX2X_STOP_ON_ERROR
461 if (unlikely(bp->panic))
462 return 0;
463#endif
464
465 /* CQ "next element" is of the size of the regular element,
466 that's why it's ok here */
467 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
468 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
469 hw_comp_cons++;
470
471 bd_cons = fp->rx_bd_cons;
472 bd_prod = fp->rx_bd_prod;
473 bd_prod_fw = bd_prod;
474 sw_comp_cons = fp->rx_comp_cons;
475 sw_comp_prod = fp->rx_comp_prod;
476
477 /* Memory barrier necessary as speculative reads of the rx
478 * buffer can be ahead of the index in the status block
479 */
480 rmb();
481
482 DP(NETIF_MSG_RX_STATUS,
483 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
484 fp->index, hw_comp_cons, sw_comp_cons);
485
486 while (sw_comp_cons != hw_comp_cons) {
487 struct sw_rx_bd *rx_buf = NULL;
488 struct sk_buff *skb;
489 union eth_rx_cqe *cqe;
490 u8 cqe_fp_flags;
491 u16 len, pad;
492
493 comp_ring_cons = RCQ_BD(sw_comp_cons);
494 bd_prod = RX_BD(bd_prod);
495 bd_cons = RX_BD(bd_cons);
496
497 /* Prefetch the page containing the BD descriptor
498 at producer's index. It will be needed when new skb is
499 allocated */
500 prefetch((void *)(PAGE_ALIGN((unsigned long)
501 (&fp->rx_desc_ring[bd_prod])) -
502 PAGE_SIZE + 1));
503
504 cqe = &fp->rx_comp_ring[comp_ring_cons];
505 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
506
507 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
508 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
509 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
510 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
511 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
512 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
513
514 /* is this a slowpath msg? */
515 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
516 bnx2x_sp_event(fp, cqe);
517 goto next_cqe;
518
519 /* this is an rx packet */
520 } else {
521 rx_buf = &fp->rx_buf_ring[bd_cons];
522 skb = rx_buf->skb;
523 prefetch(skb);
524 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
525 pad = cqe->fast_path_cqe.placement_offset;
526
fe78d263
VZ
527 /* - If CQE is marked both TPA_START and TPA_END it is
528 * a non-TPA CQE.
529 * - FP CQE will always have either TPA_START or/and
530 * TPA_STOP flags set.
531 */
9f6c9258
DK
532 if ((!fp->disable_tpa) &&
533 (TPA_TYPE(cqe_fp_flags) !=
534 (TPA_TYPE_START | TPA_TYPE_END))) {
535 u16 queue = cqe->fast_path_cqe.queue_index;
536
537 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
538 DP(NETIF_MSG_RX_STATUS,
539 "calling tpa_start on queue %d\n",
540 queue);
541
542 bnx2x_tpa_start(fp, queue, skb,
543 bd_cons, bd_prod);
544
545 /* Set Toeplitz hash for an LRO skb */
546 bnx2x_set_skb_rxhash(bp, cqe, skb);
547
548 goto next_rx;
fe78d263 549 } else { /* TPA_STOP */
9f6c9258
DK
550 DP(NETIF_MSG_RX_STATUS,
551 "calling tpa_stop on queue %d\n",
552 queue);
553
554 if (!BNX2X_RX_SUM_FIX(cqe))
555 BNX2X_ERR("STOP on none TCP "
556 "data\n");
557
558 /* This is a size of the linear data
559 on this skb */
560 len = le16_to_cpu(cqe->fast_path_cqe.
561 len_on_bd);
562 bnx2x_tpa_stop(bp, fp, queue, pad,
563 len, cqe, comp_ring_cons);
564#ifdef BNX2X_STOP_ON_ERROR
565 if (bp->panic)
566 return 0;
567#endif
568
569 bnx2x_update_sge_prod(fp,
570 &cqe->fast_path_cqe);
571 goto next_cqe;
572 }
573 }
574
575 dma_sync_single_for_device(&bp->pdev->dev,
576 dma_unmap_addr(rx_buf, mapping),
577 pad + RX_COPY_THRESH,
578 DMA_FROM_DEVICE);
217de5aa 579 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
580
581 /* is this an error packet? */
582 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
583 DP(NETIF_MSG_RX_ERR,
584 "ERROR flags %x rx packet %u\n",
585 cqe_fp_flags, sw_comp_cons);
586 fp->eth_q_stats.rx_err_discard_pkt++;
587 goto reuse_rx;
588 }
589
590 /* Since we don't have a jumbo ring
591 * copy small packets if mtu > 1500
592 */
593 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
594 (len <= RX_COPY_THRESH)) {
595 struct sk_buff *new_skb;
596
597 new_skb = netdev_alloc_skb(bp->dev,
598 len + pad);
599 if (new_skb == NULL) {
600 DP(NETIF_MSG_RX_ERR,
601 "ERROR packet dropped "
602 "because of alloc failure\n");
603 fp->eth_q_stats.rx_skb_alloc_failed++;
604 goto reuse_rx;
605 }
606
607 /* aligned copy */
608 skb_copy_from_linear_data_offset(skb, pad,
609 new_skb->data + pad, len);
610 skb_reserve(new_skb, pad);
611 skb_put(new_skb, len);
612
749a8503 613 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
614
615 skb = new_skb;
616
617 } else
618 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
619 dma_unmap_single(&bp->pdev->dev,
620 dma_unmap_addr(rx_buf, mapping),
621 bp->rx_buf_size,
622 DMA_FROM_DEVICE);
623 skb_reserve(skb, pad);
624 skb_put(skb, len);
625
626 } else {
627 DP(NETIF_MSG_RX_ERR,
628 "ERROR packet dropped because "
629 "of alloc failure\n");
630 fp->eth_q_stats.rx_skb_alloc_failed++;
631reuse_rx:
749a8503 632 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
633 goto next_rx;
634 }
635
636 skb->protocol = eth_type_trans(skb, bp->dev);
637
638 /* Set Toeplitz hash for a none-LRO skb */
639 bnx2x_set_skb_rxhash(bp, cqe, skb);
640
bc8acf2c 641 skb_checksum_none_assert(skb);
f85582f8 642
9f6c9258
DK
643 if (bp->rx_csum) {
644 if (likely(BNX2X_RX_CSUM_OK(cqe)))
645 skb->ip_summed = CHECKSUM_UNNECESSARY;
646 else
647 fp->eth_q_stats.hw_csum_err++;
648 }
649 }
650
651 skb_record_rx_queue(skb, fp->index);
652
9bcc0893
HZ
653 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
654 PARSING_FLAGS_VLAN)
655 __vlan_hwaccel_put_tag(skb,
656 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
657 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
658
659
660next_rx:
661 rx_buf->skb = NULL;
662
663 bd_cons = NEXT_RX_IDX(bd_cons);
664 bd_prod = NEXT_RX_IDX(bd_prod);
665 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
666 rx_pkt++;
667next_cqe:
668 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
669 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
670
671 if (rx_pkt == budget)
672 break;
673 } /* while */
674
675 fp->rx_bd_cons = bd_cons;
676 fp->rx_bd_prod = bd_prod_fw;
677 fp->rx_comp_cons = sw_comp_cons;
678 fp->rx_comp_prod = sw_comp_prod;
679
680 /* Update producers */
681 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
682 fp->rx_sge_prod);
683
684 fp->rx_pkt += rx_pkt;
685 fp->rx_calls++;
686
687 return rx_pkt;
688}
689
690static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
691{
692 struct bnx2x_fastpath *fp = fp_cookie;
693 struct bnx2x *bp = fp->bp;
694
695 /* Return here if interrupt is disabled */
696 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
697 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
698 return IRQ_HANDLED;
699 }
700
523224a3
DK
701 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
702 "[fp %d fw_sd %d igusb %d]\n",
703 fp->index, fp->fw_sb_id, fp->igu_sb_id);
704 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
705
706#ifdef BNX2X_STOP_ON_ERROR
707 if (unlikely(bp->panic))
708 return IRQ_HANDLED;
709#endif
710
711 /* Handle Rx and Tx according to MSI-X vector */
712 prefetch(fp->rx_cons_sb);
713 prefetch(fp->tx_cons_sb);
523224a3 714 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
715 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
716
717 return IRQ_HANDLED;
718}
719
9f6c9258
DK
720/* HW Lock for shared dual port PHYs */
721void bnx2x_acquire_phy_lock(struct bnx2x *bp)
722{
723 mutex_lock(&bp->port.phy_mutex);
724
725 if (bp->port.need_hw_lock)
726 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
727}
728
729void bnx2x_release_phy_lock(struct bnx2x *bp)
730{
731 if (bp->port.need_hw_lock)
732 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
733
734 mutex_unlock(&bp->port.phy_mutex);
735}
736
0793f83f
DK
737/* calculates MF speed according to current linespeed and MF configuration */
738u16 bnx2x_get_mf_speed(struct bnx2x *bp)
739{
740 u16 line_speed = bp->link_vars.line_speed;
741 if (IS_MF(bp)) {
faa6fcbb
DK
742 u16 maxCfg = bnx2x_extract_max_cfg(bp,
743 bp->mf_config[BP_VN(bp)]);
744
745 /* Calculate the current MAX line speed limit for the MF
746 * devices
0793f83f 747 */
faa6fcbb
DK
748 if (IS_MF_SI(bp))
749 line_speed = (line_speed * maxCfg) / 100;
750 else { /* SD mode */
0793f83f
DK
751 u16 vn_max_rate = maxCfg * 100;
752
753 if (vn_max_rate < line_speed)
754 line_speed = vn_max_rate;
faa6fcbb 755 }
0793f83f
DK
756 }
757
758 return line_speed;
759}
760
9f6c9258
DK
761void bnx2x_link_report(struct bnx2x *bp)
762{
763 if (bp->flags & MF_FUNC_DIS) {
764 netif_carrier_off(bp->dev);
765 netdev_err(bp->dev, "NIC Link is Down\n");
766 return;
767 }
768
769 if (bp->link_vars.link_up) {
770 u16 line_speed;
771
772 if (bp->state == BNX2X_STATE_OPEN)
773 netif_carrier_on(bp->dev);
774 netdev_info(bp->dev, "NIC Link is Up, ");
775
0793f83f 776 line_speed = bnx2x_get_mf_speed(bp);
9f6c9258 777
9f6c9258
DK
778 pr_cont("%d Mbps ", line_speed);
779
780 if (bp->link_vars.duplex == DUPLEX_FULL)
781 pr_cont("full duplex");
782 else
783 pr_cont("half duplex");
784
785 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
786 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
787 pr_cont(", receive ");
788 if (bp->link_vars.flow_ctrl &
789 BNX2X_FLOW_CTRL_TX)
790 pr_cont("& transmit ");
791 } else {
792 pr_cont(", transmit ");
793 }
794 pr_cont("flow control ON");
795 }
796 pr_cont("\n");
797
798 } else { /* link_down */
799 netif_carrier_off(bp->dev);
800 netdev_err(bp->dev, "NIC Link is Down\n");
801 }
802}
803
523224a3
DK
804/* Returns the number of actually allocated BDs */
805static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
806 int rx_ring_size)
807{
808 struct bnx2x *bp = fp->bp;
809 u16 ring_prod, cqe_ring_prod;
810 int i;
811
812 fp->rx_comp_cons = 0;
813 cqe_ring_prod = ring_prod = 0;
814 for (i = 0; i < rx_ring_size; i++) {
815 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
816 BNX2X_ERR("was only able to allocate "
817 "%d rx skbs on queue[%d]\n", i, fp->index);
818 fp->eth_q_stats.rx_skb_alloc_failed++;
819 break;
820 }
821 ring_prod = NEXT_RX_IDX(ring_prod);
822 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
823 WARN_ON(ring_prod <= i);
824 }
825
826 fp->rx_bd_prod = ring_prod;
827 /* Limit the CQE producer by the CQE ring size */
828 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
829 cqe_ring_prod);
830 fp->rx_pkt = fp->rx_calls = 0;
831
832 return i;
833}
834
835static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
836{
837 struct bnx2x *bp = fp->bp;
838 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
839 MAX_RX_AVAIL/bp->num_queues;
840
841 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
842
843 bnx2x_alloc_rx_bds(fp, rx_ring_size);
844
845 /* Warning!
846 * this will generate an interrupt (to the TSTORM)
847 * must only be done after chip is initialized
848 */
849 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
850 fp->rx_sge_prod);
851}
852
9f6c9258
DK
853void bnx2x_init_rx_rings(struct bnx2x *bp)
854{
855 int func = BP_FUNC(bp);
856 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
857 ETH_MAX_AGGREGATION_QUEUES_E1H;
523224a3 858 u16 ring_prod;
9f6c9258 859 int i, j;
25141580 860
523224a3 861 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
c8e4f48a 862 IP_HEADER_ALIGNMENT_PADDING;
9f6c9258 863
9f6c9258
DK
864 DP(NETIF_MSG_IFUP,
865 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
866
ec6ba945 867 for_each_rx_queue(bp, j) {
523224a3 868 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 869
523224a3 870 if (!fp->disable_tpa) {
9f6c9258
DK
871 for (i = 0; i < max_agg_queues; i++) {
872 fp->tpa_pool[i].skb =
873 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
874 if (!fp->tpa_pool[i].skb) {
875 BNX2X_ERR("Failed to allocate TPA "
876 "skb pool for queue[%d] - "
877 "disabling TPA on this "
878 "queue!\n", j);
879 bnx2x_free_tpa_pool(bp, fp, i);
880 fp->disable_tpa = 1;
881 break;
882 }
883 dma_unmap_addr_set((struct sw_rx_bd *)
884 &bp->fp->tpa_pool[i],
885 mapping, 0);
886 fp->tpa_state[i] = BNX2X_TPA_STOP;
887 }
523224a3
DK
888
889 /* "next page" elements initialization */
890 bnx2x_set_next_page_sgl(fp);
891
892 /* set SGEs bit mask */
893 bnx2x_init_sge_ring_bit_mask(fp);
894
895 /* Allocate SGEs and initialize the ring elements */
896 for (i = 0, ring_prod = 0;
897 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
898
899 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
900 BNX2X_ERR("was only able to allocate "
901 "%d rx sges\n", i);
902 BNX2X_ERR("disabling TPA for"
903 " queue[%d]\n", j);
904 /* Cleanup already allocated elements */
905 bnx2x_free_rx_sge_range(bp,
906 fp, ring_prod);
907 bnx2x_free_tpa_pool(bp,
908 fp, max_agg_queues);
909 fp->disable_tpa = 1;
910 ring_prod = 0;
911 break;
912 }
913 ring_prod = NEXT_SGE_IDX(ring_prod);
914 }
915
916 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
917 }
918 }
919
ec6ba945 920 for_each_rx_queue(bp, j) {
9f6c9258
DK
921 struct bnx2x_fastpath *fp = &bp->fp[j];
922
923 fp->rx_bd_cons = 0;
9f6c9258 924
523224a3 925 bnx2x_set_next_page_rx_bd(fp);
9f6c9258
DK
926
927 /* CQ ring */
523224a3 928 bnx2x_set_next_page_rx_cq(fp);
9f6c9258
DK
929
930 /* Allocate BDs and initialize BD ring */
523224a3 931 bnx2x_alloc_rx_bd_ring(fp);
9f6c9258 932
9f6c9258
DK
933 if (j != 0)
934 continue;
935
f2e0899f
DK
936 if (!CHIP_IS_E2(bp)) {
937 REG_WR(bp, BAR_USTRORM_INTMEM +
938 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
939 U64_LO(fp->rx_comp_mapping));
940 REG_WR(bp, BAR_USTRORM_INTMEM +
941 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
942 U64_HI(fp->rx_comp_mapping));
943 }
9f6c9258
DK
944 }
945}
f85582f8 946
9f6c9258
DK
947static void bnx2x_free_tx_skbs(struct bnx2x *bp)
948{
949 int i;
950
ec6ba945 951 for_each_tx_queue(bp, i) {
9f6c9258
DK
952 struct bnx2x_fastpath *fp = &bp->fp[i];
953
954 u16 bd_cons = fp->tx_bd_cons;
955 u16 sw_prod = fp->tx_pkt_prod;
956 u16 sw_cons = fp->tx_pkt_cons;
957
958 while (sw_cons != sw_prod) {
959 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
960 sw_cons++;
961 }
962 }
963}
964
965static void bnx2x_free_rx_skbs(struct bnx2x *bp)
966{
967 int i, j;
968
ec6ba945 969 for_each_rx_queue(bp, j) {
9f6c9258
DK
970 struct bnx2x_fastpath *fp = &bp->fp[j];
971
972 for (i = 0; i < NUM_RX_BD; i++) {
973 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
974 struct sk_buff *skb = rx_buf->skb;
975
976 if (skb == NULL)
977 continue;
978
979 dma_unmap_single(&bp->pdev->dev,
980 dma_unmap_addr(rx_buf, mapping),
981 bp->rx_buf_size, DMA_FROM_DEVICE);
982
983 rx_buf->skb = NULL;
984 dev_kfree_skb(skb);
985 }
986 if (!fp->disable_tpa)
987 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
988 ETH_MAX_AGGREGATION_QUEUES_E1 :
989 ETH_MAX_AGGREGATION_QUEUES_E1H);
990 }
991}
992
993void bnx2x_free_skbs(struct bnx2x *bp)
994{
995 bnx2x_free_tx_skbs(bp);
996 bnx2x_free_rx_skbs(bp);
997}
998
999static void bnx2x_free_msix_irqs(struct bnx2x *bp)
1000{
1001 int i, offset = 1;
1002
1003 free_irq(bp->msix_table[0].vector, bp->dev);
1004 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1005 bp->msix_table[0].vector);
1006
1007#ifdef BCM_CNIC
1008 offset++;
1009#endif
ec6ba945 1010 for_each_eth_queue(bp, i) {
9f6c9258
DK
1011 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
1012 "state %x\n", i, bp->msix_table[i + offset].vector,
1013 bnx2x_fp(bp, i, state));
1014
1015 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
1016 }
1017}
1018
d6214d7a 1019void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1020{
d6214d7a
DK
1021 if (bp->flags & USING_MSIX_FLAG)
1022 bnx2x_free_msix_irqs(bp);
1023 else if (bp->flags & USING_MSI_FLAG)
1024 free_irq(bp->pdev->irq, bp->dev);
1025 else
9f6c9258
DK
1026 free_irq(bp->pdev->irq, bp->dev);
1027}
1028
d6214d7a 1029int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1030{
d6214d7a 1031 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1032
d6214d7a
DK
1033 bp->msix_table[msix_vec].entry = msix_vec;
1034 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1035 bp->msix_table[0].entry);
1036 msix_vec++;
9f6c9258
DK
1037
1038#ifdef BCM_CNIC
d6214d7a
DK
1039 bp->msix_table[msix_vec].entry = msix_vec;
1040 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1041 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1042 msix_vec++;
9f6c9258 1043#endif
ec6ba945 1044 for_each_eth_queue(bp, i) {
d6214d7a 1045 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1046 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1047 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1048 msix_vec++;
9f6c9258
DK
1049 }
1050
ec6ba945 1051 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
d6214d7a
DK
1052
1053 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1054
1055 /*
1056 * reconfigure number of tx/rx queues according to available
1057 * MSI-X vectors
1058 */
1059 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1060 /* how less vectors we will have? */
1061 int diff = req_cnt - rc;
9f6c9258
DK
1062
1063 DP(NETIF_MSG_IFUP,
1064 "Trying to use less MSI-X vectors: %d\n", rc);
1065
1066 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1067
1068 if (rc) {
1069 DP(NETIF_MSG_IFUP,
1070 "MSI-X is not attainable rc %d\n", rc);
1071 return rc;
1072 }
d6214d7a
DK
1073 /*
1074 * decrease number of queues by number of unallocated entries
1075 */
1076 bp->num_queues -= diff;
9f6c9258
DK
1077
1078 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1079 bp->num_queues);
1080 } else if (rc) {
d6214d7a
DK
1081 /* fall to INTx if not enough memory */
1082 if (rc == -ENOMEM)
1083 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1084 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1085 return rc;
1086 }
1087
1088 bp->flags |= USING_MSIX_FLAG;
1089
1090 return 0;
1091}
1092
1093static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1094{
1095 int i, rc, offset = 1;
1096
1097 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1098 bp->dev->name, bp->dev);
1099 if (rc) {
1100 BNX2X_ERR("request sp irq failed\n");
1101 return -EBUSY;
1102 }
1103
1104#ifdef BCM_CNIC
1105 offset++;
1106#endif
ec6ba945 1107 for_each_eth_queue(bp, i) {
9f6c9258
DK
1108 struct bnx2x_fastpath *fp = &bp->fp[i];
1109 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1110 bp->dev->name, i);
1111
d6214d7a 1112 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1113 bnx2x_msix_fp_int, 0, fp->name, fp);
1114 if (rc) {
1115 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1116 bnx2x_free_msix_irqs(bp);
1117 return -EBUSY;
1118 }
1119
d6214d7a 1120 offset++;
9f6c9258
DK
1121 fp->state = BNX2X_FP_STATE_IRQ;
1122 }
1123
ec6ba945 1124 i = BNX2X_NUM_ETH_QUEUES(bp);
d6214d7a 1125 offset = 1 + CNIC_CONTEXT_USE;
9f6c9258
DK
1126 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1127 " ... fp[%d] %d\n",
1128 bp->msix_table[0].vector,
1129 0, bp->msix_table[offset].vector,
1130 i - 1, bp->msix_table[offset + i - 1].vector);
1131
1132 return 0;
1133}
1134
d6214d7a 1135int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1136{
1137 int rc;
1138
1139 rc = pci_enable_msi(bp->pdev);
1140 if (rc) {
1141 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1142 return -1;
1143 }
1144 bp->flags |= USING_MSI_FLAG;
1145
1146 return 0;
1147}
1148
1149static int bnx2x_req_irq(struct bnx2x *bp)
1150{
1151 unsigned long flags;
1152 int rc;
1153
1154 if (bp->flags & USING_MSI_FLAG)
1155 flags = 0;
1156 else
1157 flags = IRQF_SHARED;
1158
1159 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1160 bp->dev->name, bp->dev);
1161 if (!rc)
1162 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1163
1164 return rc;
1165}
1166
1167static void bnx2x_napi_enable(struct bnx2x *bp)
1168{
1169 int i;
1170
ec6ba945 1171 for_each_napi_queue(bp, i)
9f6c9258
DK
1172 napi_enable(&bnx2x_fp(bp, i, napi));
1173}
1174
1175static void bnx2x_napi_disable(struct bnx2x *bp)
1176{
1177 int i;
1178
ec6ba945 1179 for_each_napi_queue(bp, i)
9f6c9258
DK
1180 napi_disable(&bnx2x_fp(bp, i, napi));
1181}
1182
1183void bnx2x_netif_start(struct bnx2x *bp)
1184{
1185 int intr_sem;
1186
1187 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1188 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1189
1190 if (intr_sem) {
1191 if (netif_running(bp->dev)) {
1192 bnx2x_napi_enable(bp);
1193 bnx2x_int_enable(bp);
1194 if (bp->state == BNX2X_STATE_OPEN)
1195 netif_tx_wake_all_queues(bp->dev);
1196 }
1197 }
1198}
1199
1200void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1201{
1202 bnx2x_int_disable_sync(bp, disable_hw);
1203 bnx2x_napi_disable(bp);
1204 netif_tx_disable(bp->dev);
1205}
9f6c9258 1206
8307fa3e
VZ
1207u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1208{
1209#ifdef BCM_CNIC
1210 struct bnx2x *bp = netdev_priv(dev);
1211 if (NO_FCOE(bp))
1212 return skb_tx_hash(dev, skb);
1213 else {
1214 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1215 u16 ether_type = ntohs(hdr->h_proto);
1216
1217 /* Skip VLAN tag if present */
1218 if (ether_type == ETH_P_8021Q) {
1219 struct vlan_ethhdr *vhdr =
1220 (struct vlan_ethhdr *)skb->data;
1221
1222 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1223 }
1224
1225 /* If ethertype is FCoE or FIP - use FCoE ring */
1226 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1227 return bnx2x_fcoe(bp, index);
1228 }
1229#endif
1230 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1231 */
1232 return __skb_tx_hash(dev, skb,
1233 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1234}
1235
d6214d7a
DK
1236void bnx2x_set_num_queues(struct bnx2x *bp)
1237{
1238 switch (bp->multi_mode) {
1239 case ETH_RSS_MODE_DISABLED:
9f6c9258 1240 bp->num_queues = 1;
d6214d7a
DK
1241 break;
1242 case ETH_RSS_MODE_REGULAR:
1243 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258 1244 break;
f85582f8 1245
9f6c9258 1246 default:
d6214d7a 1247 bp->num_queues = 1;
9f6c9258
DK
1248 break;
1249 }
ec6ba945
VZ
1250
1251 /* Add special queues */
1252 bp->num_queues += NONE_ETH_CONTEXT_USE;
1253}
1254
1255#ifdef BCM_CNIC
1256static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1257{
1258 if (!NO_FCOE(bp)) {
1259 if (!IS_MF_SD(bp))
1260 bnx2x_set_fip_eth_mac_addr(bp, 1);
1261 bnx2x_set_all_enode_macs(bp, 1);
1262 bp->flags |= FCOE_MACS_SET;
1263 }
9f6c9258 1264}
ec6ba945 1265#endif
9f6c9258 1266
6891dd25
DK
1267static void bnx2x_release_firmware(struct bnx2x *bp)
1268{
1269 kfree(bp->init_ops_offsets);
1270 kfree(bp->init_ops);
1271 kfree(bp->init_data);
1272 release_firmware(bp->firmware);
1273}
1274
ec6ba945
VZ
1275static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1276{
1277 int rc, num = bp->num_queues;
1278
1279#ifdef BCM_CNIC
1280 if (NO_FCOE(bp))
1281 num -= FCOE_CONTEXT_USE;
1282
1283#endif
1284 netif_set_real_num_tx_queues(bp->dev, num);
1285 rc = netif_set_real_num_rx_queues(bp->dev, num);
1286 return rc;
1287}
1288
9f6c9258
DK
1289/* must be called with rtnl_lock */
1290int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1291{
1292 u32 load_code;
1293 int i, rc;
1294
6891dd25
DK
1295 /* Set init arrays */
1296 rc = bnx2x_init_firmware(bp);
1297 if (rc) {
1298 BNX2X_ERR("Error loading firmware\n");
1299 return rc;
1300 }
1301
9f6c9258
DK
1302#ifdef BNX2X_STOP_ON_ERROR
1303 if (unlikely(bp->panic))
1304 return -EPERM;
1305#endif
1306
1307 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1308
523224a3
DK
1309 /* must be called before memory allocation and HW init */
1310 bnx2x_ilt_set_info(bp);
1311
d6214d7a 1312 if (bnx2x_alloc_mem(bp))
9f6c9258 1313 return -ENOMEM;
d6214d7a 1314
ec6ba945 1315 rc = bnx2x_set_real_num_queues(bp);
d6214d7a 1316 if (rc) {
ec6ba945 1317 BNX2X_ERR("Unable to set real_num_queues\n");
d6214d7a 1318 goto load_error0;
9f6c9258
DK
1319 }
1320
1321 for_each_queue(bp, i)
1322 bnx2x_fp(bp, i, disable_tpa) =
1323 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1324
ec6ba945
VZ
1325#ifdef BCM_CNIC
1326 /* We don't want TPA on FCoE L2 ring */
1327 bnx2x_fcoe(bp, disable_tpa) = 1;
1328#endif
9f6c9258
DK
1329 bnx2x_napi_enable(bp);
1330
9f6c9258
DK
1331 /* Send LOAD_REQUEST command to MCP
1332 Returns the type of LOAD command:
1333 if it is the first port to be initialized
1334 common blocks should be initialized, otherwise - not
1335 */
1336 if (!BP_NOMCP(bp)) {
a22f0788 1337 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1338 if (!load_code) {
1339 BNX2X_ERR("MCP response failure, aborting\n");
1340 rc = -EBUSY;
d6214d7a 1341 goto load_error1;
9f6c9258
DK
1342 }
1343 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1344 rc = -EBUSY; /* other port in diagnostic mode */
d6214d7a 1345 goto load_error1;
9f6c9258
DK
1346 }
1347
1348 } else {
f2e0899f 1349 int path = BP_PATH(bp);
9f6c9258
DK
1350 int port = BP_PORT(bp);
1351
f2e0899f
DK
1352 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1353 path, load_count[path][0], load_count[path][1],
1354 load_count[path][2]);
1355 load_count[path][0]++;
1356 load_count[path][1 + port]++;
1357 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1358 path, load_count[path][0], load_count[path][1],
1359 load_count[path][2]);
1360 if (load_count[path][0] == 1)
9f6c9258 1361 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1362 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1363 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1364 else
1365 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1366 }
1367
1368 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1369 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
9f6c9258
DK
1370 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1371 bp->port.pmf = 1;
1372 else
1373 bp->port.pmf = 0;
1374 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1375
1376 /* Initialize HW */
1377 rc = bnx2x_init_hw(bp, load_code);
1378 if (rc) {
1379 BNX2X_ERR("HW init failed, aborting\n");
a22f0788 1380 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1381 goto load_error2;
1382 }
1383
d6214d7a
DK
1384 /* Connect to IRQs */
1385 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1386 if (rc) {
1387 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1388 goto load_error2;
1389 }
1390
9f6c9258
DK
1391 /* Setup NIC internals and enable interrupts */
1392 bnx2x_nic_init(bp, load_code);
1393
f2e0899f
DK
1394 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1395 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
9f6c9258
DK
1396 (bp->common.shmem2_base))
1397 SHMEM2_WR(bp, dcc_support,
1398 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1399 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1400
1401 /* Send LOAD_DONE command to MCP */
1402 if (!BP_NOMCP(bp)) {
a22f0788 1403 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1404 if (!load_code) {
1405 BNX2X_ERR("MCP response failure, aborting\n");
1406 rc = -EBUSY;
1407 goto load_error3;
1408 }
1409 }
1410
e4901dde
VZ
1411 bnx2x_dcbx_init(bp);
1412
9f6c9258
DK
1413 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1414
523224a3
DK
1415 rc = bnx2x_func_start(bp);
1416 if (rc) {
1417 BNX2X_ERR("Function start failed!\n");
1418#ifndef BNX2X_STOP_ON_ERROR
1419 goto load_error3;
1420#else
1421 bp->panic = 1;
1422 return -EBUSY;
1423#endif
1424 }
1425
1426 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
9f6c9258
DK
1427 if (rc) {
1428 BNX2X_ERR("Setup leading failed!\n");
1429#ifndef BNX2X_STOP_ON_ERROR
1430 goto load_error3;
1431#else
1432 bp->panic = 1;
1433 return -EBUSY;
1434#endif
1435 }
1436
f2e0899f
DK
1437 if (!CHIP_IS_E1(bp) &&
1438 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1439 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1440 bp->flags |= MF_FUNC_DIS;
1441 }
9f6c9258 1442
9f6c9258 1443#ifdef BCM_CNIC
523224a3
DK
1444 /* Enable Timer scan */
1445 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
9f6c9258 1446#endif
f85582f8 1447
523224a3
DK
1448 for_each_nondefault_queue(bp, i) {
1449 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1450 if (rc)
9f6c9258 1451#ifdef BCM_CNIC
523224a3 1452 goto load_error4;
9f6c9258 1453#else
523224a3 1454 goto load_error3;
9f6c9258 1455#endif
523224a3
DK
1456 }
1457
1458 /* Now when Clients are configured we are ready to work */
1459 bp->state = BNX2X_STATE_OPEN;
1460
ec6ba945
VZ
1461#ifdef BCM_CNIC
1462 bnx2x_set_fcoe_eth_macs(bp);
1463#endif
1464
523224a3 1465 bnx2x_set_eth_mac(bp, 1);
9f6c9258 1466
9f6c9258
DK
1467 if (bp->port.pmf)
1468 bnx2x_initial_phy_init(bp, load_mode);
1469
1470 /* Start fast path */
1471 switch (load_mode) {
1472 case LOAD_NORMAL:
523224a3
DK
1473 /* Tx queue should be only reenabled */
1474 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1475 /* Initialize the receive filter. */
1476 bnx2x_set_rx_mode(bp->dev);
1477 break;
1478
1479 case LOAD_OPEN:
1480 netif_tx_start_all_queues(bp->dev);
523224a3 1481 smp_mb__after_clear_bit();
9f6c9258
DK
1482 /* Initialize the receive filter. */
1483 bnx2x_set_rx_mode(bp->dev);
1484 break;
1485
1486 case LOAD_DIAG:
1487 /* Initialize the receive filter. */
1488 bnx2x_set_rx_mode(bp->dev);
1489 bp->state = BNX2X_STATE_DIAG;
1490 break;
1491
1492 default:
1493 break;
1494 }
1495
1496 if (!bp->port.pmf)
1497 bnx2x__link_status_update(bp);
1498
1499 /* start the timer */
1500 mod_timer(&bp->timer, jiffies + bp->current_interval);
1501
1502#ifdef BCM_CNIC
1503 bnx2x_setup_cnic_irq_info(bp);
1504 if (bp->state == BNX2X_STATE_OPEN)
1505 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1506#endif
1507 bnx2x_inc_load_cnt(bp);
1508
6891dd25
DK
1509 bnx2x_release_firmware(bp);
1510
9f6c9258
DK
1511 return 0;
1512
1513#ifdef BCM_CNIC
1514load_error4:
1515 /* Disable Timer scan */
1516 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1517#endif
1518load_error3:
1519 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1520
9f6c9258
DK
1521 /* Free SKBs, SGEs, TPA pool and driver internals */
1522 bnx2x_free_skbs(bp);
ec6ba945 1523 for_each_rx_queue(bp, i)
9f6c9258 1524 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1525
9f6c9258 1526 /* Release IRQs */
d6214d7a
DK
1527 bnx2x_free_irq(bp);
1528load_error2:
1529 if (!BP_NOMCP(bp)) {
1530 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1531 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1532 }
1533
1534 bp->port.pmf = 0;
9f6c9258
DK
1535load_error1:
1536 bnx2x_napi_disable(bp);
d6214d7a 1537load_error0:
9f6c9258
DK
1538 bnx2x_free_mem(bp);
1539
6891dd25
DK
1540 bnx2x_release_firmware(bp);
1541
9f6c9258
DK
1542 return rc;
1543}
1544
1545/* must be called with rtnl_lock */
1546int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1547{
1548 int i;
1549
1550 if (bp->state == BNX2X_STATE_CLOSED) {
1551 /* Interface has been removed - nothing to recover */
1552 bp->recovery_state = BNX2X_RECOVERY_DONE;
1553 bp->is_leader = 0;
1554 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1555 smp_wmb();
1556
1557 return -EINVAL;
1558 }
1559
1560#ifdef BCM_CNIC
1561 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1562#endif
1563 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1564
1565 /* Set "drop all" */
1566 bp->rx_mode = BNX2X_RX_MODE_NONE;
1567 bnx2x_set_storm_rx_mode(bp);
1568
f2e0899f
DK
1569 /* Stop Tx */
1570 bnx2x_tx_disable(bp);
f85582f8 1571
9f6c9258 1572 del_timer_sync(&bp->timer);
f85582f8 1573
f2e0899f 1574 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
9f6c9258 1575 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
9f6c9258 1576
f85582f8 1577 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9f6c9258
DK
1578
1579 /* Cleanup the chip if needed */
1580 if (unload_mode != UNLOAD_RECOVERY)
1581 bnx2x_chip_cleanup(bp, unload_mode);
523224a3
DK
1582 else {
1583 /* Disable HW interrupts, NAPI and Tx */
1584 bnx2x_netif_stop(bp, 1);
1585
1586 /* Release IRQs */
d6214d7a 1587 bnx2x_free_irq(bp);
523224a3 1588 }
9f6c9258
DK
1589
1590 bp->port.pmf = 0;
1591
1592 /* Free SKBs, SGEs, TPA pool and driver internals */
1593 bnx2x_free_skbs(bp);
ec6ba945 1594 for_each_rx_queue(bp, i)
9f6c9258 1595 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1596
9f6c9258
DK
1597 bnx2x_free_mem(bp);
1598
1599 bp->state = BNX2X_STATE_CLOSED;
1600
1601 /* The last driver must disable a "close the gate" if there is no
1602 * parity attention or "process kill" pending.
1603 */
1604 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1605 bnx2x_reset_is_done(bp))
1606 bnx2x_disable_close_the_gate(bp);
1607
1608 /* Reset MCP mail box sequence if there is on going recovery */
1609 if (unload_mode == UNLOAD_RECOVERY)
1610 bp->fw_seq = 0;
1611
1612 return 0;
1613}
f85582f8 1614
9f6c9258
DK
1615int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1616{
1617 u16 pmcsr;
1618
adf5f6a1
DK
1619 /* If there is no power capability, silently succeed */
1620 if (!bp->pm_cap) {
1621 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1622 return 0;
1623 }
1624
9f6c9258
DK
1625 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1626
1627 switch (state) {
1628 case PCI_D0:
1629 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1630 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1631 PCI_PM_CTRL_PME_STATUS));
1632
1633 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1634 /* delay required during transition out of D3hot */
1635 msleep(20);
1636 break;
1637
1638 case PCI_D3hot:
1639 /* If there are other clients above don't
1640 shut down the power */
1641 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1642 return 0;
1643 /* Don't shut down the power for emulation and FPGA */
1644 if (CHIP_REV_IS_SLOW(bp))
1645 return 0;
1646
1647 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1648 pmcsr |= 3;
1649
1650 if (bp->wol)
1651 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1652
1653 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1654 pmcsr);
1655
1656 /* No more memory access after this point until
1657 * device is brought back to D0.
1658 */
1659 break;
1660
1661 default:
1662 return -EINVAL;
1663 }
1664 return 0;
1665}
1666
9f6c9258
DK
1667/*
1668 * net_device service functions
1669 */
d6214d7a 1670int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
1671{
1672 int work_done = 0;
1673 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1674 napi);
1675 struct bnx2x *bp = fp->bp;
1676
1677 while (1) {
1678#ifdef BNX2X_STOP_ON_ERROR
1679 if (unlikely(bp->panic)) {
1680 napi_complete(napi);
1681 return 0;
1682 }
1683#endif
1684
1685 if (bnx2x_has_tx_work(fp))
1686 bnx2x_tx_int(fp);
1687
1688 if (bnx2x_has_rx_work(fp)) {
1689 work_done += bnx2x_rx_int(fp, budget - work_done);
1690
1691 /* must not complete if we consumed full budget */
1692 if (work_done >= budget)
1693 break;
1694 }
1695
1696 /* Fall out from the NAPI loop if needed */
1697 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
ec6ba945
VZ
1698#ifdef BCM_CNIC
1699 /* No need to update SB for FCoE L2 ring as long as
1700 * it's connected to the default SB and the SB
1701 * has been updated when NAPI was scheduled.
1702 */
1703 if (IS_FCOE_FP(fp)) {
1704 napi_complete(napi);
1705 break;
1706 }
1707#endif
1708
9f6c9258 1709 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
1710 /* bnx2x_has_rx_work() reads the status block,
1711 * thus we need to ensure that status block indices
1712 * have been actually read (bnx2x_update_fpsb_idx)
1713 * prior to this check (bnx2x_has_rx_work) so that
1714 * we won't write the "newer" value of the status block
1715 * to IGU (if there was a DMA right after
1716 * bnx2x_has_rx_work and if there is no rmb, the memory
1717 * reading (bnx2x_update_fpsb_idx) may be postponed
1718 * to right before bnx2x_ack_sb). In this case there
1719 * will never be another interrupt until there is
1720 * another update of the status block, while there
1721 * is still unhandled work.
1722 */
9f6c9258
DK
1723 rmb();
1724
1725 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1726 napi_complete(napi);
1727 /* Re-enable interrupts */
523224a3
DK
1728 DP(NETIF_MSG_HW,
1729 "Update index to %d\n", fp->fp_hc_idx);
1730 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1731 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
1732 IGU_INT_ENABLE, 1);
1733 break;
1734 }
1735 }
1736 }
1737
1738 return work_done;
1739}
1740
9f6c9258
DK
1741/* we split the first BD into headers and data BDs
1742 * to ease the pain of our fellow microcode engineers
1743 * we use one mapping for both BDs
1744 * So far this has only been observed to happen
1745 * in Other Operating Systems(TM)
1746 */
1747static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1748 struct bnx2x_fastpath *fp,
1749 struct sw_tx_bd *tx_buf,
1750 struct eth_tx_start_bd **tx_bd, u16 hlen,
1751 u16 bd_prod, int nbd)
1752{
1753 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1754 struct eth_tx_bd *d_tx_bd;
1755 dma_addr_t mapping;
1756 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1757
1758 /* first fix first BD */
1759 h_tx_bd->nbd = cpu_to_le16(nbd);
1760 h_tx_bd->nbytes = cpu_to_le16(hlen);
1761
1762 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1763 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1764 h_tx_bd->addr_lo, h_tx_bd->nbd);
1765
1766 /* now get a new data BD
1767 * (after the pbd) and fill it */
1768 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1769 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1770
1771 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1772 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1773
1774 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1775 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1776 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1777
1778 /* this marks the BD as one that has no individual mapping */
1779 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1780
1781 DP(NETIF_MSG_TX_QUEUED,
1782 "TSO split data size is %d (%x:%x)\n",
1783 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1784
1785 /* update tx_bd */
1786 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1787
1788 return bd_prod;
1789}
1790
1791static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1792{
1793 if (fix > 0)
1794 csum = (u16) ~csum_fold(csum_sub(csum,
1795 csum_partial(t_header - fix, fix, 0)));
1796
1797 else if (fix < 0)
1798 csum = (u16) ~csum_fold(csum_add(csum,
1799 csum_partial(t_header, -fix, 0)));
1800
1801 return swab16(csum);
1802}
1803
1804static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1805{
1806 u32 rc;
1807
1808 if (skb->ip_summed != CHECKSUM_PARTIAL)
1809 rc = XMIT_PLAIN;
1810
1811 else {
d0d9d8ef 1812 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
1813 rc = XMIT_CSUM_V6;
1814 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1815 rc |= XMIT_CSUM_TCP;
1816
1817 } else {
1818 rc = XMIT_CSUM_V4;
1819 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1820 rc |= XMIT_CSUM_TCP;
1821 }
1822 }
1823
5892b9e9
VZ
1824 if (skb_is_gso_v6(skb))
1825 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1826 else if (skb_is_gso(skb))
1827 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
1828
1829 return rc;
1830}
1831
1832#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1833/* check if packet requires linearization (packet is too fragmented)
1834 no need to check fragmentation if page size > 8K (there will be no
1835 violation to FW restrictions) */
1836static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1837 u32 xmit_type)
1838{
1839 int to_copy = 0;
1840 int hlen = 0;
1841 int first_bd_sz = 0;
1842
1843 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1844 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1845
1846 if (xmit_type & XMIT_GSO) {
1847 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1848 /* Check if LSO packet needs to be copied:
1849 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1850 int wnd_size = MAX_FETCH_BD - 3;
1851 /* Number of windows to check */
1852 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1853 int wnd_idx = 0;
1854 int frag_idx = 0;
1855 u32 wnd_sum = 0;
1856
1857 /* Headers length */
1858 hlen = (int)(skb_transport_header(skb) - skb->data) +
1859 tcp_hdrlen(skb);
1860
1861 /* Amount of data (w/o headers) on linear part of SKB*/
1862 first_bd_sz = skb_headlen(skb) - hlen;
1863
1864 wnd_sum = first_bd_sz;
1865
1866 /* Calculate the first sum - it's special */
1867 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1868 wnd_sum +=
1869 skb_shinfo(skb)->frags[frag_idx].size;
1870
1871 /* If there was data on linear skb data - check it */
1872 if (first_bd_sz > 0) {
1873 if (unlikely(wnd_sum < lso_mss)) {
1874 to_copy = 1;
1875 goto exit_lbl;
1876 }
1877
1878 wnd_sum -= first_bd_sz;
1879 }
1880
1881 /* Others are easier: run through the frag list and
1882 check all windows */
1883 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1884 wnd_sum +=
1885 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1886
1887 if (unlikely(wnd_sum < lso_mss)) {
1888 to_copy = 1;
1889 break;
1890 }
1891 wnd_sum -=
1892 skb_shinfo(skb)->frags[wnd_idx].size;
1893 }
1894 } else {
1895 /* in non-LSO too fragmented packet should always
1896 be linearized */
1897 to_copy = 1;
1898 }
1899 }
1900
1901exit_lbl:
1902 if (unlikely(to_copy))
1903 DP(NETIF_MSG_TX_QUEUED,
1904 "Linearization IS REQUIRED for %s packet. "
1905 "num_frags %d hlen %d first_bd_sz %d\n",
1906 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1907 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1908
1909 return to_copy;
1910}
1911#endif
1912
2297a2da
VZ
1913static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
1914 u32 xmit_type)
f2e0899f 1915{
2297a2da
VZ
1916 *parsing_data |= (skb_shinfo(skb)->gso_size <<
1917 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
1918 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
1919 if ((xmit_type & XMIT_GSO_V6) &&
1920 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 1921 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
1922}
1923
1924/**
1925 * Update PBD in GSO case.
1926 *
1927 * @param skb
1928 * @param tx_start_bd
1929 * @param pbd
1930 * @param xmit_type
1931 */
1932static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1933 struct eth_tx_parse_bd_e1x *pbd,
1934 u32 xmit_type)
1935{
1936 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1937 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1938 pbd->tcp_flags = pbd_tcp_flags(skb);
1939
1940 if (xmit_type & XMIT_GSO_V4) {
1941 pbd->ip_id = swab16(ip_hdr(skb)->id);
1942 pbd->tcp_pseudo_csum =
1943 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1944 ip_hdr(skb)->daddr,
1945 0, IPPROTO_TCP, 0));
1946
1947 } else
1948 pbd->tcp_pseudo_csum =
1949 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1950 &ipv6_hdr(skb)->daddr,
1951 0, IPPROTO_TCP, 0));
1952
1953 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1954}
f85582f8 1955
f2e0899f
DK
1956/**
1957 *
1958 * @param skb
1959 * @param tx_start_bd
1960 * @param pbd_e2
1961 * @param xmit_type
1962 *
1963 * @return header len
1964 */
1965static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 1966 u32 *parsing_data, u32 xmit_type)
f2e0899f 1967{
2297a2da
VZ
1968 *parsing_data |= ((tcp_hdrlen(skb)/4) <<
1969 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
1970 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 1971
2297a2da
VZ
1972 *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
1973 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
1974 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f
DK
1975
1976 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1977}
1978
1979/**
1980 *
1981 * @param skb
1982 * @param tx_start_bd
1983 * @param pbd
1984 * @param xmit_type
1985 *
1986 * @return Header length
1987 */
1988static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1989 struct eth_tx_parse_bd_e1x *pbd,
1990 u32 xmit_type)
1991{
1992 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1993
1994 /* for now NS flag is not used in Linux */
1995 pbd->global_data =
1996 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1997 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1998
1999 pbd->ip_hlen_w = (skb_transport_header(skb) -
2000 skb_network_header(skb)) / 2;
2001
2002 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
2003
2004 pbd->total_hlen_w = cpu_to_le16(hlen);
2005 hlen = hlen*2;
2006
2007 if (xmit_type & XMIT_CSUM_TCP) {
2008 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2009
2010 } else {
2011 s8 fix = SKB_CS_OFF(skb); /* signed! */
2012
2013 DP(NETIF_MSG_TX_QUEUED,
2014 "hlen %d fix %d csum before fix %x\n",
2015 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2016
2017 /* HW bug: fixup the CSUM */
2018 pbd->tcp_pseudo_csum =
2019 bnx2x_csum_fix(skb_transport_header(skb),
2020 SKB_CS(skb), fix);
2021
2022 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2023 pbd->tcp_pseudo_csum);
2024 }
2025
2026 return hlen;
2027}
f85582f8 2028
9f6c9258
DK
2029/* called with netif_tx_lock
2030 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2031 * netif_wake_queue()
2032 */
2033netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2034{
2035 struct bnx2x *bp = netdev_priv(dev);
2036 struct bnx2x_fastpath *fp;
2037 struct netdev_queue *txq;
2038 struct sw_tx_bd *tx_buf;
2039 struct eth_tx_start_bd *tx_start_bd;
2040 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 2041 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 2042 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 2043 u32 pbd_e2_parsing_data = 0;
9f6c9258
DK
2044 u16 pkt_prod, bd_prod;
2045 int nbd, fp_index;
2046 dma_addr_t mapping;
2047 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2048 int i;
2049 u8 hlen = 0;
2050 __le16 pkt_size = 0;
2051 struct ethhdr *eth;
2052 u8 mac_type = UNICAST_ADDRESS;
2053
2054#ifdef BNX2X_STOP_ON_ERROR
2055 if (unlikely(bp->panic))
2056 return NETDEV_TX_BUSY;
2057#endif
2058
2059 fp_index = skb_get_queue_mapping(skb);
2060 txq = netdev_get_tx_queue(dev, fp_index);
2061
2062 fp = &bp->fp[fp_index];
2063
2064 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2065 fp->eth_q_stats.driver_xoff++;
2066 netif_tx_stop_queue(txq);
2067 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2068 return NETDEV_TX_BUSY;
2069 }
2070
f2e0899f
DK
2071 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2072 "protocol(%x,%x) gso type %x xmit_type %x\n",
2073 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
2074 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2075
2076 eth = (struct ethhdr *)skb->data;
2077
2078 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2079 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2080 if (is_broadcast_ether_addr(eth->h_dest))
2081 mac_type = BROADCAST_ADDRESS;
2082 else
2083 mac_type = MULTICAST_ADDRESS;
2084 }
2085
2086#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2087 /* First, check if we need to linearize the skb (due to FW
2088 restrictions). No need to check fragmentation if page size > 8K
2089 (there will be no violation to FW restrictions) */
2090 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2091 /* Statistics of linearization */
2092 bp->lin_cnt++;
2093 if (skb_linearize(skb) != 0) {
2094 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2095 "silently dropping this SKB\n");
2096 dev_kfree_skb_any(skb);
2097 return NETDEV_TX_OK;
2098 }
2099 }
2100#endif
2101
2102 /*
2103 Please read carefully. First we use one BD which we mark as start,
2104 then we have a parsing info BD (used for TSO or xsum),
2105 and only then we have the rest of the TSO BDs.
2106 (don't forget to mark the last one as last,
2107 and to unmap only AFTER you write to the BD ...)
2108 And above all, all pdb sizes are in words - NOT DWORDS!
2109 */
2110
2111 pkt_prod = fp->tx_pkt_prod++;
2112 bd_prod = TX_BD(fp->tx_bd_prod);
2113
2114 /* get a tx_buf and first BD */
2115 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2116 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2117
2118 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2119 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2120 mac_type);
2121
9f6c9258 2122 /* header nbd */
f85582f8 2123 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2124
2125 /* remember the first BD of the packet */
2126 tx_buf->first_bd = fp->tx_bd_prod;
2127 tx_buf->skb = skb;
2128 tx_buf->flags = 0;
2129
2130 DP(NETIF_MSG_TX_QUEUED,
2131 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2132 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2133
eab6d18d 2134 if (vlan_tx_tag_present(skb)) {
523224a3
DK
2135 tx_start_bd->vlan_or_ethertype =
2136 cpu_to_le16(vlan_tx_tag_get(skb));
2137 tx_start_bd->bd_flags.as_bitfield |=
2138 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 2139 } else
523224a3 2140 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2141
2142 /* turn on parsing and get a BD */
2143 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2144
523224a3
DK
2145 if (xmit_type & XMIT_CSUM) {
2146 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2147
2148 if (xmit_type & XMIT_CSUM_V4)
2149 tx_start_bd->bd_flags.as_bitfield |=
2150 ETH_TX_BD_FLAGS_IP_CSUM;
2151 else
2152 tx_start_bd->bd_flags.as_bitfield |=
2153 ETH_TX_BD_FLAGS_IPV6;
9f6c9258 2154
523224a3
DK
2155 if (!(xmit_type & XMIT_CSUM_TCP))
2156 tx_start_bd->bd_flags.as_bitfield |=
2157 ETH_TX_BD_FLAGS_IS_UDP;
2158 }
9f6c9258 2159
f2e0899f
DK
2160 if (CHIP_IS_E2(bp)) {
2161 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2162 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2163 /* Set PBD in checksum offload case */
2164 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
2165 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2166 &pbd_e2_parsing_data,
2167 xmit_type);
f2e0899f
DK
2168 } else {
2169 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2170 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2171 /* Set PBD in checksum offload case */
2172 if (xmit_type & XMIT_CSUM)
2173 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2174
9f6c9258
DK
2175 }
2176
f85582f8 2177 /* Map skb linear data for DMA */
9f6c9258
DK
2178 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2179 skb_headlen(skb), DMA_TO_DEVICE);
2180
f85582f8 2181 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
2182 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2183 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2184 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2185 tx_start_bd->nbd = cpu_to_le16(nbd);
2186 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2187 pkt_size = tx_start_bd->nbytes;
2188
2189 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2190 " nbytes %d flags %x vlan %x\n",
2191 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2192 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2193 tx_start_bd->bd_flags.as_bitfield,
2194 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2195
2196 if (xmit_type & XMIT_GSO) {
2197
2198 DP(NETIF_MSG_TX_QUEUED,
2199 "TSO packet len %d hlen %d total len %d tso size %d\n",
2200 skb->len, hlen, skb_headlen(skb),
2201 skb_shinfo(skb)->gso_size);
2202
2203 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2204
2205 if (unlikely(skb_headlen(skb) > hlen))
2206 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2207 hlen, bd_prod, ++nbd);
f2e0899f 2208 if (CHIP_IS_E2(bp))
2297a2da
VZ
2209 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2210 xmit_type);
f2e0899f
DK
2211 else
2212 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 2213 }
2297a2da
VZ
2214
2215 /* Set the PBD's parsing_data field if not zero
2216 * (for the chips newer than 57711).
2217 */
2218 if (pbd_e2_parsing_data)
2219 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2220
9f6c9258
DK
2221 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2222
f85582f8 2223 /* Handle fragmented skb */
9f6c9258
DK
2224 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2225 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2226
2227 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2228 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2229 if (total_pkt_bd == NULL)
2230 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2231
2232 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2233 frag->page_offset,
2234 frag->size, DMA_TO_DEVICE);
2235
2236 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2237 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2238 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2239 le16_add_cpu(&pkt_size, frag->size);
2240
2241 DP(NETIF_MSG_TX_QUEUED,
2242 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2243 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2244 le16_to_cpu(tx_data_bd->nbytes));
2245 }
2246
2247 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2248
2249 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2250
2251 /* now send a tx doorbell, counting the next BD
2252 * if the packet contains or ends with it
2253 */
2254 if (TX_BD_POFF(bd_prod) < nbd)
2255 nbd++;
2256
2257 if (total_pkt_bd != NULL)
2258 total_pkt_bd->total_pkt_bytes = pkt_size;
2259
523224a3 2260 if (pbd_e1x)
9f6c9258 2261 DP(NETIF_MSG_TX_QUEUED,
523224a3 2262 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2263 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2264 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2265 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2266 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2267 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2268 if (pbd_e2)
2269 DP(NETIF_MSG_TX_QUEUED,
2270 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2271 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2272 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2273 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2274 pbd_e2->parsing_data);
9f6c9258
DK
2275 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2276
2277 /*
2278 * Make sure that the BD data is updated before updating the producer
2279 * since FW might read the BD right after the producer is updated.
2280 * This is only applicable for weak-ordered memory model archs such
2281 * as IA-64. The following barrier is also mandatory since FW will
2282 * assumes packets must have BDs.
2283 */
2284 wmb();
2285
2286 fp->tx_db.data.prod += nbd;
2287 barrier();
f85582f8 2288
523224a3 2289 DOORBELL(bp, fp->cid, fp->tx_db.raw);
9f6c9258
DK
2290
2291 mmiowb();
2292
2293 fp->tx_bd_prod += nbd;
2294
2295 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2296 netif_tx_stop_queue(txq);
2297
2298 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2299 * ordering of set_bit() in netif_tx_stop_queue() and read of
2300 * fp->bd_tx_cons */
2301 smp_mb();
2302
2303 fp->eth_q_stats.driver_xoff++;
2304 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2305 netif_tx_wake_queue(txq);
2306 }
2307 fp->tx_pkt++;
2308
2309 return NETDEV_TX_OK;
2310}
f85582f8 2311
9f6c9258
DK
2312/* called with rtnl_lock */
2313int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2314{
2315 struct sockaddr *addr = p;
2316 struct bnx2x *bp = netdev_priv(dev);
2317
2318 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2319 return -EINVAL;
2320
2321 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
523224a3
DK
2322 if (netif_running(dev))
2323 bnx2x_set_eth_mac(bp, 1);
9f6c9258
DK
2324
2325 return 0;
2326}
2327
d6214d7a 2328
8d96286a 2329static int bnx2x_setup_irqs(struct bnx2x *bp)
d6214d7a
DK
2330{
2331 int rc = 0;
2332 if (bp->flags & USING_MSIX_FLAG) {
2333 rc = bnx2x_req_msix_irqs(bp);
2334 if (rc)
2335 return rc;
2336 } else {
2337 bnx2x_ack_int(bp);
2338 rc = bnx2x_req_irq(bp);
2339 if (rc) {
2340 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2341 return rc;
2342 }
2343 if (bp->flags & USING_MSI_FLAG) {
2344 bp->dev->irq = bp->pdev->irq;
2345 netdev_info(bp->dev, "using MSI IRQ %d\n",
2346 bp->pdev->irq);
2347 }
2348 }
2349
2350 return 0;
2351}
2352
523224a3
DK
2353void bnx2x_free_mem_bp(struct bnx2x *bp)
2354{
2355 kfree(bp->fp);
2356 kfree(bp->msix_table);
2357 kfree(bp->ilt);
2358}
2359
2360int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2361{
2362 struct bnx2x_fastpath *fp;
2363 struct msix_entry *tbl;
2364 struct bnx2x_ilt *ilt;
2365
2366 /* fp array */
2367 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2368 if (!fp)
2369 goto alloc_err;
2370 bp->fp = fp;
2371
2372 /* msix table */
ec6ba945 2373 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
523224a3
DK
2374 GFP_KERNEL);
2375 if (!tbl)
2376 goto alloc_err;
2377 bp->msix_table = tbl;
2378
2379 /* ilt */
2380 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2381 if (!ilt)
2382 goto alloc_err;
2383 bp->ilt = ilt;
2384
2385 return 0;
2386alloc_err:
2387 bnx2x_free_mem_bp(bp);
2388 return -ENOMEM;
2389
2390}
2391
9f6c9258
DK
2392/* called with rtnl_lock */
2393int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2394{
2395 struct bnx2x *bp = netdev_priv(dev);
2396 int rc = 0;
2397
2398 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2399 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2400 return -EAGAIN;
2401 }
2402
2403 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2404 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2405 return -EINVAL;
2406
2407 /* This does not race with packet allocation
2408 * because the actual alloc size is
2409 * only updated as part of load
2410 */
2411 dev->mtu = new_mtu;
2412
2413 if (netif_running(dev)) {
2414 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2415 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2416 }
2417
2418 return rc;
2419}
2420
2421void bnx2x_tx_timeout(struct net_device *dev)
2422{
2423 struct bnx2x *bp = netdev_priv(dev);
2424
2425#ifdef BNX2X_STOP_ON_ERROR
2426 if (!bp->panic)
2427 bnx2x_panic();
2428#endif
2429 /* This allows the netif to be shutdown gracefully before resetting */
2430 schedule_delayed_work(&bp->reset_task, 0);
2431}
2432
9f6c9258
DK
2433int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2434{
2435 struct net_device *dev = pci_get_drvdata(pdev);
2436 struct bnx2x *bp;
2437
2438 if (!dev) {
2439 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2440 return -ENODEV;
2441 }
2442 bp = netdev_priv(dev);
2443
2444 rtnl_lock();
2445
2446 pci_save_state(pdev);
2447
2448 if (!netif_running(dev)) {
2449 rtnl_unlock();
2450 return 0;
2451 }
2452
2453 netif_device_detach(dev);
2454
2455 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2456
2457 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2458
2459 rtnl_unlock();
2460
2461 return 0;
2462}
2463
2464int bnx2x_resume(struct pci_dev *pdev)
2465{
2466 struct net_device *dev = pci_get_drvdata(pdev);
2467 struct bnx2x *bp;
2468 int rc;
2469
2470 if (!dev) {
2471 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2472 return -ENODEV;
2473 }
2474 bp = netdev_priv(dev);
2475
2476 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2477 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2478 return -EAGAIN;
2479 }
2480
2481 rtnl_lock();
2482
2483 pci_restore_state(pdev);
2484
2485 if (!netif_running(dev)) {
2486 rtnl_unlock();
2487 return 0;
2488 }
2489
2490 bnx2x_set_power_state(bp, PCI_D0);
2491 netif_device_attach(dev);
2492
f2e0899f
DK
2493 /* Since the chip was reset, clear the FW sequence number */
2494 bp->fw_seq = 0;
9f6c9258
DK
2495 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2496
2497 rtnl_unlock();
2498
2499 return rc;
2500}