net: Fix a comment typo
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
c0cba59e 27#include <linux/prefetch.h>
9f6c9258 28#include "bnx2x_cmn.h"
523224a3 29#include "bnx2x_init.h"
042181f5 30#include "bnx2x_sp.h"
9f6c9258 31
b3b83c3f
DK
32/**
33 * bnx2x_move_fp - move content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @from: source FP index
37 * @to: destination FP index
38 *
39 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
40 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
65565884
MS
42 * source onto the target. Update txdata pointers and related
43 * content.
b3b83c3f
DK
44 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
49 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
50 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
51 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
52 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
53 int old_max_eth_txqs, new_max_eth_txqs;
54 int old_txdata_index = 0, new_txdata_index = 0;
72754080
AE
55
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp->napi = to_fp->napi;
58
b3b83c3f
DK
59 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp, from_fp, sizeof(*to_fp));
61 to_fp->index = to;
65565884 62
15192a8c
BW
63 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
65
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
68
65565884
MS
69 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
72 */
73
74 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
75 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
76 (bp)->max_cos;
77 if (from == FCOE_IDX(bp)) {
78 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
79 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 }
81
4864a16a
YM
82 memcpy(&bp->bnx2x_txq[new_txdata_index],
83 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
84 sizeof(struct bnx2x_fp_txdata));
85 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
86}
87
8ca5e17e
AE
88/**
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
90 *
91 * @bp: driver handle
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
94 *
95 */
96void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
97{
98 if (IS_PF(bp)) {
99 u8 phy_fw_ver[PHY_FW_VER_LEN];
100
101 phy_fw_ver[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp->link_params,
103 phy_fw_ver, PHY_FW_VER_LEN);
104 strlcpy(buf, bp->fw_ver, buf_len);
105 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
106 "bc %d.%d.%d%s%s",
107 (bp->common.bc_ver & 0xff0000) >> 16,
108 (bp->common.bc_ver & 0xff00) >> 8,
109 (bp->common.bc_ver & 0xff),
110 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
111 } else {
6411280a 112 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
113 }
114}
115
4864a16a
YM
116/**
117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
118 *
119 * @bp: driver handle
120 * @delta: number of eth queues which were not allocated
121 */
122static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
123{
124 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
125
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden
128 */
129 for (cos = 1; cos < bp->max_cos; cos++) {
130 for (i = 0; i < old_eth_num - delta; i++) {
131 struct bnx2x_fastpath *fp = &bp->fp[i];
132 int new_idx = cos * (old_eth_num - delta) + i;
133
134 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
135 sizeof(struct bnx2x_fp_txdata));
136 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
137 }
138 }
139}
140
619c5cb6
VZ
141int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
142
9f6c9258
DK
143/* free skb in the packet ring at pos idx
144 * return idx of last bd freed
145 */
6383c0b3 146static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
147 u16 idx, unsigned int *pkts_compl,
148 unsigned int *bytes_compl)
9f6c9258 149{
6383c0b3 150 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
151 struct eth_tx_start_bd *tx_start_bd;
152 struct eth_tx_bd *tx_data_bd;
153 struct sk_buff *skb = tx_buf->skb;
154 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
155 int nbd;
156
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
158 prefetch(&skb->end);
159
51c1a580 160 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 161 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
162
163 /* unmap first bd */
6383c0b3 164 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 165 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 166 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 167
619c5cb6 168
9f6c9258
DK
169 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
170#ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
172 BNX2X_ERR("BAD nbd!\n");
173 bnx2x_panic();
174 }
175#endif
176 new_cons = nbd + tx_buf->first_bd;
177
178 /* Get the next bd */
179 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
180
181 /* Skip a parse bd... */
182 --nbd;
183 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
184
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
187 --nbd;
188 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
189 }
190
191 /* now free frags */
192 while (nbd > 0) {
193
6383c0b3 194 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
195 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
196 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
197 if (--nbd)
198 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
199 }
200
201 /* release skb */
202 WARN_ON(!skb);
d8290ae5 203 if (likely(skb)) {
2df1a70a
TH
204 (*pkts_compl)++;
205 (*bytes_compl) += skb->len;
206 }
d8290ae5 207
40955532 208 dev_kfree_skb_any(skb);
9f6c9258
DK
209 tx_buf->first_bd = 0;
210 tx_buf->skb = NULL;
211
212 return new_cons;
213}
214
6383c0b3 215int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 216{
9f6c9258 217 struct netdev_queue *txq;
6383c0b3 218 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 219 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
220
221#ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp->panic))
223 return -1;
224#endif
225
6383c0b3
AE
226 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
227 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
228 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
229
230 while (sw_cons != hw_cons) {
231 u16 pkt_cons;
232
233 pkt_cons = TX_BD(sw_cons);
234
51c1a580
MS
235 DP(NETIF_MSG_TX_DONE,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 237 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 238
2df1a70a 239 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 240 &pkts_compl, &bytes_compl);
2df1a70a 241
9f6c9258
DK
242 sw_cons++;
243 }
244
2df1a70a
TH
245 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
246
6383c0b3
AE
247 txdata->tx_pkt_cons = sw_cons;
248 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
249
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
254 * forever.
619c5cb6
VZ
255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
258 */
259 smp_mb();
260
9f6c9258
DK
261 if (unlikely(netif_tx_queue_stopped(txq))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
266 *
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
269 * stops the queue
270 */
271
272 __netif_tx_lock(txq, smp_processor_id());
273
274 if ((netif_tx_queue_stopped(txq)) &&
275 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 276 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
277 netif_tx_wake_queue(txq);
278
279 __netif_tx_unlock(txq);
280 }
281 return 0;
282}
283
284static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
285 u16 idx)
286{
287 u16 last_max = fp->last_max_sge;
288
289 if (SUB_S16(idx, last_max) > 0)
290 fp->last_max_sge = idx;
291}
292
621b4d66
DK
293static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
294 u16 sge_len,
295 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
296{
297 struct bnx2x *bp = fp->bp;
9f6c9258
DK
298 u16 last_max, last_elem, first_elem;
299 u16 delta = 0;
300 u16 i;
301
302 if (!sge_len)
303 return;
304
305 /* First mark all used pages */
306 for (i = 0; i < sge_len; i++)
619c5cb6 307 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 308 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
309
310 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 311 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
312
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp->sge_mask));
523224a3 315 bnx2x_update_last_max_sge(fp,
621b4d66 316 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
317
318 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
319 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
320 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
321
322 /* If ring is not full */
323 if (last_elem + 1 != first_elem)
324 last_elem++;
325
326 /* Now update the prod */
327 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
328 if (likely(fp->sge_mask[i]))
329 break;
330
619c5cb6
VZ
331 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
332 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
333 }
334
335 if (delta > 0) {
336 fp->rx_sge_prod += delta;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp);
339 }
340
341 DP(NETIF_MSG_RX_STATUS,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp->last_max_sge, fp->rx_sge_prod);
344}
345
2de67439 346/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
347 * CQE (calculated by HW).
348 */
349static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
350 const struct eth_fast_path_rx_cqe *cqe,
351 bool *l4_rxhash)
e52fcb24 352{
2de67439 353 /* Get Toeplitz hash from CQE */
e52fcb24 354 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
355 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
356 enum eth_rss_hash_type htype;
357
358 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
359 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
360 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 361 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
362 }
363 *l4_rxhash = false;
e52fcb24
ED
364 return 0;
365}
366
9f6c9258 367static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 368 u16 cons, u16 prod,
619c5cb6 369 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
370{
371 struct bnx2x *bp = fp->bp;
372 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
373 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
374 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
375 dma_addr_t mapping;
619c5cb6
VZ
376 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
377 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 378
619c5cb6
VZ
379 /* print error if current state != stop */
380 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
381 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
382
e52fcb24 383 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 384 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 385 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
386 fp->rx_buf_size, DMA_FROM_DEVICE);
387 /*
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
391 */
392
393 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
394 /* Move the BD from the consumer to the producer */
e52fcb24 395 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
396 tpa_info->tpa_state = BNX2X_TPA_ERROR;
397 return;
398 }
9f6c9258 399
e52fcb24
ED
400 /* move empty data from pool to prod */
401 prod_rx_buf->data = first_buf->data;
619c5cb6 402 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 403 /* point prod_bd to new data */
9f6c9258
DK
404 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
405 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
406
619c5cb6
VZ
407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf = *cons_rx_buf;
409
410 /* mark bin state as START */
411 tpa_info->parsing_flags =
412 le16_to_cpu(cqe->pars_flags.flags);
413 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
414 tpa_info->tpa_state = BNX2X_TPA_START;
415 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
416 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 417 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
418 if (fp->mode == TPA_MODE_GRO) {
419 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 420 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
421 tpa_info->gro_size = gro_size;
422 }
619c5cb6 423
9f6c9258
DK
424#ifdef BNX2X_STOP_ON_ERROR
425 fp->tpa_queue_used |= (1 << queue);
426#ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
428#else
429 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
430#endif
431 fp->tpa_queue_used);
432#endif
433}
434
e4e3c02a
VZ
435/* Timestamp option length allowed for TPA aggregation:
436 *
437 * nop nop kind length echo val
438 */
439#define TPA_TSTAMP_OPT_LEN 12
440/**
cbf1de72 441 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 442 *
cbf1de72 443 * @skb: packet skb
e8920674
DK
444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
446 * aggregation.
cbf1de72 447 * @pkt_len: length of all segments
e8920674
DK
448 *
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
2de67439 451 * Compute number of aggregated segments, and gso_type.
e4e3c02a 452 */
cbf1de72 453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
e4e3c02a 456{
cbf1de72 457 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 458 * other than timestamp or IPv6 extension headers.
e4e3c02a 459 */
619c5cb6
VZ
460 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
461
462 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 463 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 464 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
465 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
466 } else {
619c5cb6 467 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
468 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
469 }
e4e3c02a
VZ
470
471 /* Check if there was a TCP timestamp, if there is it's will
472 * always be 12 bytes length: nop nop kind length echo val.
473 *
474 * Otherwise FW would close the aggregation.
475 */
476 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
477 hdrs_len += TPA_TSTAMP_OPT_LEN;
478
cbf1de72
YM
479 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
480
481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
482 * to skb_shinfo(skb)->gso_segs
483 */
ab5777d7 484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
485}
486
1191cb83
ED
487static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
488 struct bnx2x_fastpath *fp, u16 index)
489{
490 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
491 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
492 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
493 dma_addr_t mapping;
494
495 if (unlikely(page == NULL)) {
496 BNX2X_ERR("Can't alloc sge\n");
497 return -ENOMEM;
498 }
499
500 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 501 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
502 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
503 __free_pages(page, PAGES_PER_SGE_SHIFT);
504 BNX2X_ERR("Can't map sge\n");
505 return -ENOMEM;
506 }
507
508 sw_buf->page = page;
509 dma_unmap_addr_set(sw_buf, mapping, mapping);
510
511 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
512 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
513
514 return 0;
515}
516
9f6c9258 517static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
518 struct bnx2x_agg_info *tpa_info,
519 u16 pages,
520 struct sk_buff *skb,
619c5cb6
VZ
521 struct eth_end_agg_rx_cqe *cqe,
522 u16 cqe_idx)
9f6c9258
DK
523{
524 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
525 u32 i, frag_len, frag_size;
526 int err, j, frag_id = 0;
619c5cb6 527 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 528 u16 full_page = 0, gro_size = 0;
9f6c9258 529
619c5cb6 530 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
531
532 if (fp->mode == TPA_MODE_GRO) {
533 gro_size = tpa_info->gro_size;
534 full_page = tpa_info->full_page;
535 }
9f6c9258
DK
536
537 /* This is needed in order to enable forwarding support */
cbf1de72
YM
538 if (frag_size)
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 542
9f6c9258 543#ifdef BNX2X_STOP_ON_ERROR
924d75ab 544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
545 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 pages, cqe_idx);
619c5cb6 547 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
548 bnx2x_panic();
549 return -EINVAL;
550 }
551#endif
552
553 /* Run through the SGL and compose the fragmented skb */
554 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 555 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
556
557 /* FW gives the indices of the SGE as if the ring is an array
558 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
559 if (fp->mode == TPA_MODE_GRO)
560 frag_len = min_t(u32, frag_size, (u32)full_page);
561 else /* LRO */
924d75ab 562 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 563
9f6c9258
DK
564 rx_pg = &fp->rx_page_ring[sge_idx];
565 old_rx_pg = *rx_pg;
566
567 /* If we fail to allocate a substitute page, we simply stop
568 where we are and drop the whole packet */
569 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
570 if (unlikely(err)) {
15192a8c 571 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
572 return err;
573 }
574
575 /* Unmap the page as we r going to pass it to the stack */
576 dma_unmap_page(&bp->pdev->dev,
577 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 578 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 579 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
580 if (fp->mode == TPA_MODE_LRO)
581 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
582 else { /* GRO */
583 int rem;
584 int offset = 0;
585 for (rem = frag_len; rem > 0; rem -= gro_size) {
586 int len = rem > gro_size ? gro_size : rem;
587 skb_fill_page_desc(skb, frag_id++,
588 old_rx_pg.page, offset, len);
589 if (offset)
590 get_page(old_rx_pg.page);
591 offset += len;
592 }
593 }
9f6c9258
DK
594
595 skb->data_len += frag_len;
924d75ab 596 skb->truesize += SGE_PAGES;
9f6c9258
DK
597 skb->len += frag_len;
598
599 frag_size -= frag_len;
600 }
601
602 return 0;
603}
604
d46d132c
ED
605static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
606{
607 if (fp->rx_frag_size)
608 put_page(virt_to_head_page(data));
609 else
610 kfree(data);
611}
612
613static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
614{
615 if (fp->rx_frag_size)
616 return netdev_alloc_frag(fp->rx_frag_size);
617
618 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
619}
620
9969085e
YM
621#ifdef CONFIG_INET
622static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
623{
624 const struct iphdr *iph = ip_hdr(skb);
625 struct tcphdr *th;
626
627 skb_set_transport_header(skb, sizeof(struct iphdr));
628 th = tcp_hdr(skb);
629
630 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
631 iph->saddr, iph->daddr, 0);
632}
633
634static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
635{
636 struct ipv6hdr *iph = ipv6_hdr(skb);
637 struct tcphdr *th;
638
639 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
640 th = tcp_hdr(skb);
641
642 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
643 &iph->saddr, &iph->daddr, 0);
644}
645#endif
646
647static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
648 struct sk_buff *skb)
649{
650#ifdef CONFIG_INET
cbf1de72 651 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
652 skb_set_network_header(skb, 0);
653 switch (be16_to_cpu(skb->protocol)) {
654 case ETH_P_IP:
655 bnx2x_gro_ip_csum(bp, skb);
656 break;
657 case ETH_P_IPV6:
658 bnx2x_gro_ipv6_csum(bp, skb);
659 break;
660 default:
661 BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
662 be16_to_cpu(skb->protocol));
663 }
664 tcp_gro_complete(skb);
665 }
666#endif
667 napi_gro_receive(&fp->napi, skb);
668}
669
1191cb83
ED
670static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
671 struct bnx2x_agg_info *tpa_info,
672 u16 pages,
673 struct eth_end_agg_rx_cqe *cqe,
674 u16 cqe_idx)
9f6c9258 675{
619c5cb6 676 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 677 u8 pad = tpa_info->placement_offset;
619c5cb6 678 u16 len = tpa_info->len_on_bd;
e52fcb24 679 struct sk_buff *skb = NULL;
621b4d66 680 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
681 u8 old_tpa_state = tpa_info->tpa_state;
682
683 tpa_info->tpa_state = BNX2X_TPA_STOP;
684
685 /* If we there was an error during the handling of the TPA_START -
686 * drop this aggregation.
687 */
688 if (old_tpa_state == BNX2X_TPA_ERROR)
689 goto drop;
690
e52fcb24 691 /* Try to allocate the new data */
d46d132c 692 new_data = bnx2x_frag_alloc(fp);
9f6c9258
DK
693 /* Unmap skb in the pool anyway, as we are going to change
694 pool entry status to BNX2X_TPA_STOP even if new skb allocation
695 fails. */
696 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 697 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 698 if (likely(new_data))
d46d132c 699 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 700
e52fcb24 701 if (likely(skb)) {
9f6c9258 702#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 703 if (pad + len > fp->rx_buf_size) {
51c1a580 704 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 705 pad, len, fp->rx_buf_size);
9f6c9258
DK
706 bnx2x_panic();
707 return;
708 }
709#endif
710
e52fcb24 711 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 712 skb_put(skb, len);
e52fcb24 713 skb->rxhash = tpa_info->rxhash;
a334b5fb 714 skb->l4_rxhash = tpa_info->l4_rxhash;
9f6c9258
DK
715
716 skb->protocol = eth_type_trans(skb, bp->dev);
717 skb->ip_summed = CHECKSUM_UNNECESSARY;
718
621b4d66
DK
719 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
720 skb, cqe, cqe_idx)) {
619c5cb6
VZ
721 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
722 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9969085e 723 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 724 } else {
51c1a580
MS
725 DP(NETIF_MSG_RX_STATUS,
726 "Failed to allocate new pages - dropping packet!\n");
40955532 727 dev_kfree_skb_any(skb);
9f6c9258
DK
728 }
729
730
e52fcb24
ED
731 /* put new data in bin */
732 rx_buf->data = new_data;
9f6c9258 733
619c5cb6 734 return;
9f6c9258 735 }
d46d132c 736 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
737drop:
738 /* drop the packet and keep the buffer in the bin */
739 DP(NETIF_MSG_RX_STATUS,
740 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 741 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
742}
743
1191cb83
ED
744static int bnx2x_alloc_rx_data(struct bnx2x *bp,
745 struct bnx2x_fastpath *fp, u16 index)
746{
747 u8 *data;
748 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
749 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
750 dma_addr_t mapping;
751
d46d132c 752 data = bnx2x_frag_alloc(fp);
1191cb83
ED
753 if (unlikely(data == NULL))
754 return -ENOMEM;
755
756 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
757 fp->rx_buf_size,
758 DMA_FROM_DEVICE);
759 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 760 bnx2x_frag_free(fp, data);
1191cb83
ED
761 BNX2X_ERR("Can't map rx data\n");
762 return -ENOMEM;
763 }
764
765 rx_buf->data = data;
766 dma_unmap_addr_set(rx_buf, mapping, mapping);
767
768 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
769 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
770
771 return 0;
772}
773
15192a8c
BW
774static
775void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
776 struct bnx2x_fastpath *fp,
777 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 778{
e488921f
MS
779 /* Do nothing if no L4 csum validation was done.
780 * We do not check whether IP csum was validated. For IPv4 we assume
781 * that if the card got as far as validating the L4 csum, it also
782 * validated the IP csum. IPv6 has no IP csum.
783 */
d6cb3e41 784 if (cqe->fast_path_cqe.status_flags &
e488921f 785 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
786 return;
787
e488921f 788 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
789
790 if (cqe->fast_path_cqe.type_error_flags &
791 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
792 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 793 qstats->hw_csum_err++;
d6cb3e41
ED
794 else
795 skb->ip_summed = CHECKSUM_UNNECESSARY;
796}
9f6c9258
DK
797
798int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
799{
800 struct bnx2x *bp = fp->bp;
801 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
802 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
803 int rx_pkt = 0;
804
805#ifdef BNX2X_STOP_ON_ERROR
806 if (unlikely(bp->panic))
807 return 0;
808#endif
809
810 /* CQ "next element" is of the size of the regular element,
811 that's why it's ok here */
812 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
813 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
814 hw_comp_cons++;
815
816 bd_cons = fp->rx_bd_cons;
817 bd_prod = fp->rx_bd_prod;
818 bd_prod_fw = bd_prod;
819 sw_comp_cons = fp->rx_comp_cons;
820 sw_comp_prod = fp->rx_comp_prod;
821
822 /* Memory barrier necessary as speculative reads of the rx
823 * buffer can be ahead of the index in the status block
824 */
825 rmb();
826
827 DP(NETIF_MSG_RX_STATUS,
828 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
829 fp->index, hw_comp_cons, sw_comp_cons);
830
831 while (sw_comp_cons != hw_comp_cons) {
832 struct sw_rx_bd *rx_buf = NULL;
833 struct sk_buff *skb;
834 union eth_rx_cqe *cqe;
619c5cb6 835 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 836 u8 cqe_fp_flags;
619c5cb6 837 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 838 u16 len, pad, queue;
e52fcb24 839 u8 *data;
a334b5fb 840 bool l4_rxhash;
9f6c9258 841
619c5cb6
VZ
842#ifdef BNX2X_STOP_ON_ERROR
843 if (unlikely(bp->panic))
844 return 0;
845#endif
846
9f6c9258
DK
847 comp_ring_cons = RCQ_BD(sw_comp_cons);
848 bd_prod = RX_BD(bd_prod);
849 bd_cons = RX_BD(bd_cons);
850
9f6c9258 851 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
852 cqe_fp = &cqe->fast_path_cqe;
853 cqe_fp_flags = cqe_fp->type_error_flags;
854 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 855
51c1a580
MS
856 DP(NETIF_MSG_RX_STATUS,
857 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
858 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
859 cqe_fp_flags, cqe_fp->status_flags,
860 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
861 le16_to_cpu(cqe_fp->vlan_tag),
862 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
863
864 /* is this a slowpath msg? */
619c5cb6 865 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
866 bnx2x_sp_event(fp, cqe);
867 goto next_cqe;
e52fcb24 868 }
621b4d66 869
e52fcb24
ED
870 rx_buf = &fp->rx_buf_ring[bd_cons];
871 data = rx_buf->data;
9f6c9258 872
e52fcb24 873 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
874 struct bnx2x_agg_info *tpa_info;
875 u16 frag_size, pages;
619c5cb6 876#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
877 /* sanity check */
878 if (fp->disable_tpa &&
879 (CQE_TYPE_START(cqe_fp_type) ||
880 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 881 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 882 CQE_TYPE(cqe_fp_type));
619c5cb6 883#endif
9f6c9258 884
e52fcb24
ED
885 if (CQE_TYPE_START(cqe_fp_type)) {
886 u16 queue = cqe_fp->queue_index;
887 DP(NETIF_MSG_RX_STATUS,
888 "calling tpa_start on queue %d\n",
889 queue);
9f6c9258 890
e52fcb24
ED
891 bnx2x_tpa_start(fp, queue,
892 bd_cons, bd_prod,
893 cqe_fp);
621b4d66 894
e52fcb24 895 goto next_rx;
e52fcb24 896
621b4d66
DK
897 }
898 queue = cqe->end_agg_cqe.queue_index;
899 tpa_info = &fp->tpa_info[queue];
900 DP(NETIF_MSG_RX_STATUS,
901 "calling tpa_stop on queue %d\n",
902 queue);
903
904 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
905 tpa_info->len_on_bd;
906
907 if (fp->mode == TPA_MODE_GRO)
908 pages = (frag_size + tpa_info->full_page - 1) /
909 tpa_info->full_page;
910 else
911 pages = SGE_PAGE_ALIGN(frag_size) >>
912 SGE_PAGE_SHIFT;
913
914 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
915 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 916#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
917 if (bp->panic)
918 return 0;
9f6c9258
DK
919#endif
920
621b4d66
DK
921 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
922 goto next_cqe;
e52fcb24
ED
923 }
924 /* non TPA */
621b4d66 925 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
926 pad = cqe_fp->placement_offset;
927 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 928 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
929 pad + RX_COPY_THRESH,
930 DMA_FROM_DEVICE);
931 pad += NET_SKB_PAD;
932 prefetch(data + pad); /* speedup eth_type_trans() */
933 /* is this an error packet? */
934 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 935 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
936 "ERROR flags %x rx packet %u\n",
937 cqe_fp_flags, sw_comp_cons);
15192a8c 938 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
939 goto reuse_rx;
940 }
9f6c9258 941
e52fcb24
ED
942 /* Since we don't have a jumbo ring
943 * copy small packets if mtu > 1500
944 */
945 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
946 (len <= RX_COPY_THRESH)) {
947 skb = netdev_alloc_skb_ip_align(bp->dev, len);
948 if (skb == NULL) {
51c1a580 949 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 950 "ERROR packet dropped because of alloc failure\n");
15192a8c 951 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
952 goto reuse_rx;
953 }
e52fcb24
ED
954 memcpy(skb->data, data + pad, len);
955 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
956 } else {
957 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 958 dma_unmap_single(&bp->pdev->dev,
e52fcb24 959 dma_unmap_addr(rx_buf, mapping),
a8c94b91 960 fp->rx_buf_size,
9f6c9258 961 DMA_FROM_DEVICE);
d46d132c 962 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 963 if (unlikely(!skb)) {
d46d132c 964 bnx2x_frag_free(fp, data);
15192a8c
BW
965 bnx2x_fp_qstats(bp, fp)->
966 rx_skb_alloc_failed++;
e52fcb24
ED
967 goto next_rx;
968 }
9f6c9258 969 skb_reserve(skb, pad);
9f6c9258 970 } else {
51c1a580
MS
971 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
972 "ERROR packet dropped because of alloc failure\n");
15192a8c 973 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 974reuse_rx:
e52fcb24 975 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
976 goto next_rx;
977 }
036d2df9 978 }
9f6c9258 979
036d2df9
DK
980 skb_put(skb, len);
981 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 982
036d2df9 983 /* Set Toeplitz hash for a none-LRO skb */
a334b5fb
ED
984 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
985 skb->l4_rxhash = l4_rxhash;
9f6c9258 986
036d2df9 987 skb_checksum_none_assert(skb);
f85582f8 988
d6cb3e41 989 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
990 bnx2x_csum_validate(skb, cqe, fp,
991 bnx2x_fp_qstats(bp, fp));
9f6c9258 992
f233cafe 993 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 994
619c5cb6
VZ
995 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
996 PARSING_FLAGS_VLAN)
9bcc0893 997 __vlan_hwaccel_put_tag(skb,
619c5cb6 998 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 999 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
1000
1001
1002next_rx:
e52fcb24 1003 rx_buf->data = NULL;
9f6c9258
DK
1004
1005 bd_cons = NEXT_RX_IDX(bd_cons);
1006 bd_prod = NEXT_RX_IDX(bd_prod);
1007 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1008 rx_pkt++;
1009next_cqe:
1010 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1011 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1012
1013 if (rx_pkt == budget)
1014 break;
1015 } /* while */
1016
1017 fp->rx_bd_cons = bd_cons;
1018 fp->rx_bd_prod = bd_prod_fw;
1019 fp->rx_comp_cons = sw_comp_cons;
1020 fp->rx_comp_prod = sw_comp_prod;
1021
1022 /* Update producers */
1023 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1024 fp->rx_sge_prod);
1025
1026 fp->rx_pkt += rx_pkt;
1027 fp->rx_calls++;
1028
1029 return rx_pkt;
1030}
1031
1032static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1033{
1034 struct bnx2x_fastpath *fp = fp_cookie;
1035 struct bnx2x *bp = fp->bp;
6383c0b3 1036 u8 cos;
9f6c9258 1037
51c1a580
MS
1038 DP(NETIF_MSG_INTR,
1039 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3
DK
1040 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1041 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1042
1043#ifdef BNX2X_STOP_ON_ERROR
1044 if (unlikely(bp->panic))
1045 return IRQ_HANDLED;
1046#endif
1047
1048 /* Handle Rx and Tx according to MSI-X vector */
1049 prefetch(fp->rx_cons_sb);
6383c0b3
AE
1050
1051 for_each_cos_in_tx_queue(fp, cos)
65565884 1052 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1053
523224a3 1054 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1055 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1056
1057 return IRQ_HANDLED;
1058}
1059
9f6c9258
DK
1060/* HW Lock for shared dual port PHYs */
1061void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1062{
1063 mutex_lock(&bp->port.phy_mutex);
1064
8203c4b6 1065 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1066}
1067
1068void bnx2x_release_phy_lock(struct bnx2x *bp)
1069{
8203c4b6 1070 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1071
1072 mutex_unlock(&bp->port.phy_mutex);
1073}
1074
0793f83f
DK
1075/* calculates MF speed according to current linespeed and MF configuration */
1076u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1077{
1078 u16 line_speed = bp->link_vars.line_speed;
1079 if (IS_MF(bp)) {
faa6fcbb
DK
1080 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1081 bp->mf_config[BP_VN(bp)]);
1082
1083 /* Calculate the current MAX line speed limit for the MF
1084 * devices
0793f83f 1085 */
faa6fcbb
DK
1086 if (IS_MF_SI(bp))
1087 line_speed = (line_speed * maxCfg) / 100;
1088 else { /* SD mode */
0793f83f
DK
1089 u16 vn_max_rate = maxCfg * 100;
1090
1091 if (vn_max_rate < line_speed)
1092 line_speed = vn_max_rate;
faa6fcbb 1093 }
0793f83f
DK
1094 }
1095
1096 return line_speed;
1097}
1098
2ae17f66
VZ
1099/**
1100 * bnx2x_fill_report_data - fill link report data to report
1101 *
1102 * @bp: driver handle
1103 * @data: link state to update
1104 *
1105 * It uses a none-atomic bit operations because is called under the mutex.
1106 */
1191cb83
ED
1107static void bnx2x_fill_report_data(struct bnx2x *bp,
1108 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1109{
1110 u16 line_speed = bnx2x_get_mf_speed(bp);
1111
1112 memset(data, 0, sizeof(*data));
1113
1114 /* Fill the report data: efective line speed */
1115 data->line_speed = line_speed;
1116
1117 /* Link is down */
1118 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1119 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1120 &data->link_report_flags);
1121
1122 /* Full DUPLEX */
1123 if (bp->link_vars.duplex == DUPLEX_FULL)
1124 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1125
1126 /* Rx Flow Control is ON */
1127 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1128 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1129
1130 /* Tx Flow Control is ON */
1131 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1132 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1133}
1134
1135/**
1136 * bnx2x_link_report - report link status to OS.
1137 *
1138 * @bp: driver handle
1139 *
1140 * Calls the __bnx2x_link_report() under the same locking scheme
1141 * as a link/PHY state managing code to ensure a consistent link
1142 * reporting.
1143 */
1144
9f6c9258
DK
1145void bnx2x_link_report(struct bnx2x *bp)
1146{
2ae17f66
VZ
1147 bnx2x_acquire_phy_lock(bp);
1148 __bnx2x_link_report(bp);
1149 bnx2x_release_phy_lock(bp);
1150}
9f6c9258 1151
2ae17f66
VZ
1152/**
1153 * __bnx2x_link_report - report link status to OS.
1154 *
1155 * @bp: driver handle
1156 *
1157 * None atomic inmlementation.
1158 * Should be called under the phy_lock.
1159 */
1160void __bnx2x_link_report(struct bnx2x *bp)
1161{
1162 struct bnx2x_link_report_data cur_data;
9f6c9258 1163
2ae17f66 1164 /* reread mf_cfg */
ad5afc89 1165 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1166 bnx2x_read_mf_cfg(bp);
1167
1168 /* Read the current link report info */
1169 bnx2x_fill_report_data(bp, &cur_data);
1170
1171 /* Don't report link down or exactly the same link status twice */
1172 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1173 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1174 &bp->last_reported_link.link_report_flags) &&
1175 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1176 &cur_data.link_report_flags)))
1177 return;
1178
1179 bp->link_cnt++;
9f6c9258 1180
2ae17f66
VZ
1181 /* We are going to report a new link parameters now -
1182 * remember the current data for the next time.
1183 */
1184 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1185
2ae17f66
VZ
1186 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1187 &cur_data.link_report_flags)) {
1188 netif_carrier_off(bp->dev);
1189 netdev_err(bp->dev, "NIC Link is Down\n");
1190 return;
1191 } else {
94f05b0f
JP
1192 const char *duplex;
1193 const char *flow;
1194
2ae17f66 1195 netif_carrier_on(bp->dev);
9f6c9258 1196
2ae17f66
VZ
1197 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1198 &cur_data.link_report_flags))
94f05b0f 1199 duplex = "full";
9f6c9258 1200 else
94f05b0f 1201 duplex = "half";
9f6c9258 1202
2ae17f66
VZ
1203 /* Handle the FC at the end so that only these flags would be
1204 * possibly set. This way we may easily check if there is no FC
1205 * enabled.
1206 */
1207 if (cur_data.link_report_flags) {
1208 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1209 &cur_data.link_report_flags)) {
2ae17f66
VZ
1210 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1211 &cur_data.link_report_flags))
94f05b0f
JP
1212 flow = "ON - receive & transmit";
1213 else
1214 flow = "ON - receive";
9f6c9258 1215 } else {
94f05b0f 1216 flow = "ON - transmit";
9f6c9258 1217 }
94f05b0f
JP
1218 } else {
1219 flow = "none";
9f6c9258 1220 }
94f05b0f
JP
1221 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1222 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1223 }
1224}
1225
1191cb83
ED
1226static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1227{
1228 int i;
1229
1230 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1231 struct eth_rx_sge *sge;
1232
1233 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1234 sge->addr_hi =
1235 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1236 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1237
1238 sge->addr_lo =
1239 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1240 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1241 }
1242}
1243
1244static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1245 struct bnx2x_fastpath *fp, int last)
1246{
1247 int i;
1248
1249 for (i = 0; i < last; i++) {
1250 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1251 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1252 u8 *data = first_buf->data;
1253
1254 if (data == NULL) {
1255 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1256 continue;
1257 }
1258 if (tpa_info->tpa_state == BNX2X_TPA_START)
1259 dma_unmap_single(&bp->pdev->dev,
1260 dma_unmap_addr(first_buf, mapping),
1261 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1262 bnx2x_frag_free(fp, data);
1191cb83
ED
1263 first_buf->data = NULL;
1264 }
1265}
1266
55c11941
MS
1267void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1268{
1269 int j;
1270
1271 for_each_rx_queue_cnic(bp, j) {
1272 struct bnx2x_fastpath *fp = &bp->fp[j];
1273
1274 fp->rx_bd_cons = 0;
1275
1276 /* Activate BD ring */
1277 /* Warning!
1278 * this will generate an interrupt (to the TSTORM)
1279 * must only be done after chip is initialized
1280 */
1281 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1282 fp->rx_sge_prod);
1283 }
1284}
1285
9f6c9258
DK
1286void bnx2x_init_rx_rings(struct bnx2x *bp)
1287{
1288 int func = BP_FUNC(bp);
523224a3 1289 u16 ring_prod;
9f6c9258 1290 int i, j;
25141580 1291
b3b83c3f 1292 /* Allocate TPA resources */
55c11941 1293 for_each_eth_queue(bp, j) {
523224a3 1294 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1295
a8c94b91
VZ
1296 DP(NETIF_MSG_IFUP,
1297 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1298
523224a3 1299 if (!fp->disable_tpa) {
619c5cb6 1300 /* Fill the per-aggregtion pool */
dfacf138 1301 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1302 struct bnx2x_agg_info *tpa_info =
1303 &fp->tpa_info[i];
1304 struct sw_rx_bd *first_buf =
1305 &tpa_info->first_buf;
1306
d46d132c 1307 first_buf->data = bnx2x_frag_alloc(fp);
e52fcb24 1308 if (!first_buf->data) {
51c1a580
MS
1309 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1310 j);
9f6c9258
DK
1311 bnx2x_free_tpa_pool(bp, fp, i);
1312 fp->disable_tpa = 1;
1313 break;
1314 }
619c5cb6
VZ
1315 dma_unmap_addr_set(first_buf, mapping, 0);
1316 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1317 }
523224a3
DK
1318
1319 /* "next page" elements initialization */
1320 bnx2x_set_next_page_sgl(fp);
1321
1322 /* set SGEs bit mask */
1323 bnx2x_init_sge_ring_bit_mask(fp);
1324
1325 /* Allocate SGEs and initialize the ring elements */
1326 for (i = 0, ring_prod = 0;
1327 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1328
1329 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1330 BNX2X_ERR("was only able to allocate %d rx sges\n",
1331 i);
1332 BNX2X_ERR("disabling TPA for queue[%d]\n",
1333 j);
523224a3 1334 /* Cleanup already allocated elements */
619c5cb6
VZ
1335 bnx2x_free_rx_sge_range(bp, fp,
1336 ring_prod);
1337 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1338 MAX_AGG_QS(bp));
523224a3
DK
1339 fp->disable_tpa = 1;
1340 ring_prod = 0;
1341 break;
1342 }
1343 ring_prod = NEXT_SGE_IDX(ring_prod);
1344 }
1345
1346 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1347 }
1348 }
1349
55c11941 1350 for_each_eth_queue(bp, j) {
9f6c9258
DK
1351 struct bnx2x_fastpath *fp = &bp->fp[j];
1352
1353 fp->rx_bd_cons = 0;
9f6c9258 1354
b3b83c3f
DK
1355 /* Activate BD ring */
1356 /* Warning!
1357 * this will generate an interrupt (to the TSTORM)
1358 * must only be done after chip is initialized
1359 */
1360 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1361 fp->rx_sge_prod);
9f6c9258 1362
9f6c9258
DK
1363 if (j != 0)
1364 continue;
1365
619c5cb6 1366 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1367 REG_WR(bp, BAR_USTRORM_INTMEM +
1368 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1369 U64_LO(fp->rx_comp_mapping));
1370 REG_WR(bp, BAR_USTRORM_INTMEM +
1371 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1372 U64_HI(fp->rx_comp_mapping));
1373 }
9f6c9258
DK
1374 }
1375}
f85582f8 1376
55c11941 1377static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1378{
6383c0b3 1379 u8 cos;
55c11941 1380 struct bnx2x *bp = fp->bp;
9f6c9258 1381
55c11941
MS
1382 for_each_cos_in_tx_queue(fp, cos) {
1383 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1384 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1385
55c11941
MS
1386 u16 sw_prod = txdata->tx_pkt_prod;
1387 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1388
55c11941
MS
1389 while (sw_cons != sw_prod) {
1390 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1391 &pkts_compl, &bytes_compl);
1392 sw_cons++;
9f6c9258 1393 }
55c11941
MS
1394
1395 netdev_tx_reset_queue(
1396 netdev_get_tx_queue(bp->dev,
1397 txdata->txq_index));
1398 }
1399}
1400
1401static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1402{
1403 int i;
1404
1405 for_each_tx_queue_cnic(bp, i) {
1406 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1407 }
1408}
1409
1410static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1411{
1412 int i;
1413
1414 for_each_eth_queue(bp, i) {
1415 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1416 }
1417}
1418
b3b83c3f
DK
1419static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1420{
1421 struct bnx2x *bp = fp->bp;
1422 int i;
1423
1424 /* ring wasn't allocated */
1425 if (fp->rx_buf_ring == NULL)
1426 return;
1427
1428 for (i = 0; i < NUM_RX_BD; i++) {
1429 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1430 u8 *data = rx_buf->data;
b3b83c3f 1431
e52fcb24 1432 if (data == NULL)
b3b83c3f 1433 continue;
b3b83c3f
DK
1434 dma_unmap_single(&bp->pdev->dev,
1435 dma_unmap_addr(rx_buf, mapping),
1436 fp->rx_buf_size, DMA_FROM_DEVICE);
1437
e52fcb24 1438 rx_buf->data = NULL;
d46d132c 1439 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1440 }
1441}
1442
55c11941
MS
1443static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1444{
1445 int j;
1446
1447 for_each_rx_queue_cnic(bp, j) {
1448 bnx2x_free_rx_bds(&bp->fp[j]);
1449 }
1450}
1451
9f6c9258
DK
1452static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1453{
b3b83c3f 1454 int j;
9f6c9258 1455
55c11941 1456 for_each_eth_queue(bp, j) {
9f6c9258
DK
1457 struct bnx2x_fastpath *fp = &bp->fp[j];
1458
b3b83c3f 1459 bnx2x_free_rx_bds(fp);
9f6c9258 1460
9f6c9258 1461 if (!fp->disable_tpa)
dfacf138 1462 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1463 }
1464}
1465
55c11941
MS
1466void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1467{
1468 bnx2x_free_tx_skbs_cnic(bp);
1469 bnx2x_free_rx_skbs_cnic(bp);
1470}
1471
9f6c9258
DK
1472void bnx2x_free_skbs(struct bnx2x *bp)
1473{
1474 bnx2x_free_tx_skbs(bp);
1475 bnx2x_free_rx_skbs(bp);
1476}
1477
e3835b99
DK
1478void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1479{
1480 /* load old values */
1481 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1482
1483 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1484 /* leave all but MAX value */
1485 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1486
1487 /* set new MAX value */
1488 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1489 & FUNC_MF_CFG_MAX_BW_MASK;
1490
1491 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1492 }
1493}
1494
ca92429f
DK
1495/**
1496 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1497 *
1498 * @bp: driver handle
1499 * @nvecs: number of vectors to be released
1500 */
1501static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1502{
ca92429f 1503 int i, offset = 0;
9f6c9258 1504
ca92429f
DK
1505 if (nvecs == offset)
1506 return;
ad5afc89
AE
1507
1508 /* VFs don't have a default SB */
1509 if (IS_PF(bp)) {
1510 free_irq(bp->msix_table[offset].vector, bp->dev);
1511 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1512 bp->msix_table[offset].vector);
1513 offset++;
1514 }
55c11941
MS
1515
1516 if (CNIC_SUPPORT(bp)) {
1517 if (nvecs == offset)
1518 return;
1519 offset++;
1520 }
ca92429f 1521
ec6ba945 1522 for_each_eth_queue(bp, i) {
ca92429f
DK
1523 if (nvecs == offset)
1524 return;
51c1a580
MS
1525 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1526 i, bp->msix_table[offset].vector);
9f6c9258 1527
ca92429f 1528 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1529 }
1530}
1531
d6214d7a 1532void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1533{
30a5de77 1534 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1535 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1536 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1537
1538 /* vfs don't have a default status block */
1539 if (IS_PF(bp))
1540 nvecs++;
1541
1542 bnx2x_free_msix_irqs(bp, nvecs);
1543 } else {
30a5de77 1544 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1545 }
9f6c9258
DK
1546}
1547
0e8d2ec5 1548int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1549{
1ab4434c 1550 int msix_vec = 0, i, rc;
9f6c9258 1551
1ab4434c
AE
1552 /* VFs don't have a default status block */
1553 if (IS_PF(bp)) {
1554 bp->msix_table[msix_vec].entry = msix_vec;
1555 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1556 bp->msix_table[0].entry);
1557 msix_vec++;
1558 }
9f6c9258 1559
55c11941
MS
1560 /* Cnic requires an msix vector for itself */
1561 if (CNIC_SUPPORT(bp)) {
1562 bp->msix_table[msix_vec].entry = msix_vec;
1563 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1564 msix_vec, bp->msix_table[msix_vec].entry);
1565 msix_vec++;
1566 }
1567
6383c0b3 1568 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1569 for_each_eth_queue(bp, i) {
d6214d7a 1570 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1571 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1572 msix_vec, msix_vec, i);
d6214d7a 1573 msix_vec++;
9f6c9258
DK
1574 }
1575
1ab4434c
AE
1576 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1577 msix_vec);
d6214d7a 1578
1ab4434c 1579 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1580
1581 /*
1582 * reconfigure number of tx/rx queues according to available
1583 * MSI-X vectors
1584 */
55c11941 1585 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1586 /* how less vectors we will have? */
1ab4434c 1587 int diff = msix_vec - rc;
9f6c9258 1588
51c1a580 1589 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1590
1591 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1592
1593 if (rc) {
30a5de77
DK
1594 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1595 goto no_msix;
9f6c9258 1596 }
d6214d7a
DK
1597 /*
1598 * decrease number of queues by number of unallocated entries
1599 */
55c11941
MS
1600 bp->num_ethernet_queues -= diff;
1601 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1602
51c1a580 1603 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1604 bp->num_queues);
1605 } else if (rc > 0) {
1606 /* Get by with single vector */
1607 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1608 if (rc) {
1609 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1610 rc);
1611 goto no_msix;
1612 }
1613
1614 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1615 bp->flags |= USING_SINGLE_MSIX_FLAG;
1616
55c11941
MS
1617 BNX2X_DEV_INFO("set number of queues to 1\n");
1618 bp->num_ethernet_queues = 1;
1619 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1620 } else if (rc < 0) {
51c1a580 1621 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1622 goto no_msix;
9f6c9258
DK
1623 }
1624
1625 bp->flags |= USING_MSIX_FLAG;
1626
1627 return 0;
30a5de77
DK
1628
1629no_msix:
1630 /* fall to INTx if not enough memory */
1631 if (rc == -ENOMEM)
1632 bp->flags |= DISABLE_MSI_FLAG;
1633
1634 return rc;
9f6c9258
DK
1635}
1636
1637static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1638{
ca92429f 1639 int i, rc, offset = 0;
9f6c9258 1640
ad5afc89
AE
1641 /* no default status block for vf */
1642 if (IS_PF(bp)) {
1643 rc = request_irq(bp->msix_table[offset++].vector,
1644 bnx2x_msix_sp_int, 0,
1645 bp->dev->name, bp->dev);
1646 if (rc) {
1647 BNX2X_ERR("request sp irq failed\n");
1648 return -EBUSY;
1649 }
9f6c9258
DK
1650 }
1651
55c11941
MS
1652 if (CNIC_SUPPORT(bp))
1653 offset++;
1654
ec6ba945 1655 for_each_eth_queue(bp, i) {
9f6c9258
DK
1656 struct bnx2x_fastpath *fp = &bp->fp[i];
1657 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1658 bp->dev->name, i);
1659
d6214d7a 1660 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1661 bnx2x_msix_fp_int, 0, fp->name, fp);
1662 if (rc) {
ca92429f
DK
1663 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1664 bp->msix_table[offset].vector, rc);
1665 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1666 return -EBUSY;
1667 }
1668
d6214d7a 1669 offset++;
9f6c9258
DK
1670 }
1671
ec6ba945 1672 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1673 if (IS_PF(bp)) {
1674 offset = 1 + CNIC_SUPPORT(bp);
1675 netdev_info(bp->dev,
1676 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1677 bp->msix_table[0].vector,
1678 0, bp->msix_table[offset].vector,
1679 i - 1, bp->msix_table[offset + i - 1].vector);
1680 } else {
1681 offset = CNIC_SUPPORT(bp);
1682 netdev_info(bp->dev,
1683 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1684 0, bp->msix_table[offset].vector,
1685 i - 1, bp->msix_table[offset + i - 1].vector);
1686 }
9f6c9258
DK
1687 return 0;
1688}
1689
d6214d7a 1690int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1691{
1692 int rc;
1693
1694 rc = pci_enable_msi(bp->pdev);
1695 if (rc) {
51c1a580 1696 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1697 return -1;
1698 }
1699 bp->flags |= USING_MSI_FLAG;
1700
1701 return 0;
1702}
1703
1704static int bnx2x_req_irq(struct bnx2x *bp)
1705{
1706 unsigned long flags;
30a5de77 1707 unsigned int irq;
9f6c9258 1708
30a5de77 1709 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1710 flags = 0;
1711 else
1712 flags = IRQF_SHARED;
1713
30a5de77
DK
1714 if (bp->flags & USING_MSIX_FLAG)
1715 irq = bp->msix_table[0].vector;
1716 else
1717 irq = bp->pdev->irq;
1718
1719 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1720}
1721
1191cb83 1722static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1723{
1724 int rc = 0;
30a5de77
DK
1725 if (bp->flags & USING_MSIX_FLAG &&
1726 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1727 rc = bnx2x_req_msix_irqs(bp);
1728 if (rc)
1729 return rc;
1730 } else {
619c5cb6
VZ
1731 rc = bnx2x_req_irq(bp);
1732 if (rc) {
1733 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1734 return rc;
1735 }
1736 if (bp->flags & USING_MSI_FLAG) {
1737 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1738 netdev_info(bp->dev, "using MSI IRQ %d\n",
1739 bp->dev->irq);
1740 }
1741 if (bp->flags & USING_MSIX_FLAG) {
1742 bp->dev->irq = bp->msix_table[0].vector;
1743 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1744 bp->dev->irq);
619c5cb6
VZ
1745 }
1746 }
1747
1748 return 0;
1749}
1750
55c11941
MS
1751static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1752{
1753 int i;
1754
1755 for_each_rx_queue_cnic(bp, i)
1756 napi_enable(&bnx2x_fp(bp, i, napi));
1757}
1758
1191cb83 1759static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1760{
1761 int i;
1762
55c11941 1763 for_each_eth_queue(bp, i)
9f6c9258
DK
1764 napi_enable(&bnx2x_fp(bp, i, napi));
1765}
1766
55c11941
MS
1767static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1768{
1769 int i;
1770
1771 for_each_rx_queue_cnic(bp, i)
1772 napi_disable(&bnx2x_fp(bp, i, napi));
1773}
1774
1191cb83 1775static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1776{
1777 int i;
1778
55c11941 1779 for_each_eth_queue(bp, i)
9f6c9258
DK
1780 napi_disable(&bnx2x_fp(bp, i, napi));
1781}
1782
1783void bnx2x_netif_start(struct bnx2x *bp)
1784{
4b7ed897
DK
1785 if (netif_running(bp->dev)) {
1786 bnx2x_napi_enable(bp);
55c11941
MS
1787 if (CNIC_LOADED(bp))
1788 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1789 bnx2x_int_enable(bp);
1790 if (bp->state == BNX2X_STATE_OPEN)
1791 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1792 }
1793}
1794
1795void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1796{
1797 bnx2x_int_disable_sync(bp, disable_hw);
1798 bnx2x_napi_disable(bp);
55c11941
MS
1799 if (CNIC_LOADED(bp))
1800 bnx2x_napi_disable_cnic(bp);
9f6c9258 1801}
9f6c9258 1802
8307fa3e
VZ
1803u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1804{
8307fa3e 1805 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1806
55c11941 1807 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1808 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1809 u16 ether_type = ntohs(hdr->h_proto);
1810
1811 /* Skip VLAN tag if present */
1812 if (ether_type == ETH_P_8021Q) {
1813 struct vlan_ethhdr *vhdr =
1814 (struct vlan_ethhdr *)skb->data;
1815
1816 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1817 }
1818
1819 /* If ethertype is FCoE or FIP - use FCoE ring */
1820 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1821 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1822 }
55c11941 1823
cdb9d6ae 1824 /* select a non-FCoE queue */
6383c0b3 1825 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1826}
1827
d6214d7a
DK
1828void bnx2x_set_num_queues(struct bnx2x *bp)
1829{
96305234 1830 /* RSS queues */
55c11941 1831 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1832
a3348722
BW
1833 /* override in STORAGE SD modes */
1834 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1835 bp->num_ethernet_queues = 1;
1836
ec6ba945 1837 /* Add special queues */
55c11941
MS
1838 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1839 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1840
1841 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1842}
1843
cdb9d6ae
VZ
1844/**
1845 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1846 *
1847 * @bp: Driver handle
1848 *
1849 * We currently support for at most 16 Tx queues for each CoS thus we will
1850 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1851 * bp->max_cos.
1852 *
1853 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1854 * index after all ETH L2 indices.
1855 *
1856 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1857 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1858 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1859 *
1860 * The proper configuration of skb->queue_mapping is handled by
1861 * bnx2x_select_queue() and __skb_tx_hash().
1862 *
1863 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1864 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1865 */
55c11941 1866static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1867{
6383c0b3 1868 int rc, tx, rx;
ec6ba945 1869
65565884 1870 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1871 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1872
6383c0b3 1873/* account for fcoe queue */
55c11941
MS
1874 if (include_cnic && !NO_FCOE(bp)) {
1875 rx++;
1876 tx++;
6383c0b3 1877 }
6383c0b3
AE
1878
1879 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1880 if (rc) {
1881 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1882 return rc;
1883 }
1884 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1885 if (rc) {
1886 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1887 return rc;
1888 }
1889
51c1a580 1890 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1891 tx, rx);
1892
ec6ba945
VZ
1893 return rc;
1894}
1895
1191cb83 1896static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1897{
1898 int i;
1899
1900 for_each_queue(bp, i) {
1901 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1902 u32 mtu;
a8c94b91
VZ
1903
1904 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1905 if (IS_FCOE_IDX(i))
1906 /*
1907 * Although there are no IP frames expected to arrive to
1908 * this ring we still want to add an
1909 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1910 * overrun attack.
1911 */
e52fcb24 1912 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1913 else
e52fcb24
ED
1914 mtu = bp->dev->mtu;
1915 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1916 IP_HEADER_ALIGNMENT_PADDING +
1917 ETH_OVREHEAD +
1918 mtu +
1919 BNX2X_FW_RX_ALIGN_END;
1920 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
d46d132c
ED
1921 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1922 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1923 else
1924 fp->rx_frag_size = 0;
a8c94b91
VZ
1925 }
1926}
1927
1191cb83 1928static int bnx2x_init_rss_pf(struct bnx2x *bp)
619c5cb6
VZ
1929{
1930 int i;
619c5cb6
VZ
1931 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1932
96305234 1933 /* Prepare the initial contents fo the indirection table if RSS is
619c5cb6
VZ
1934 * enabled
1935 */
5d317c6a
MS
1936 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1937 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1938 bp->fp->cl_id +
1939 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1940
1941 /*
1942 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1943 * per-port, so if explicit configuration is needed , do it only
1944 * for a PMF.
1945 *
1946 * For 57712 and newer on the other hand it's a per-function
1947 * configuration.
1948 */
5d317c6a 1949 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1950}
1951
96305234 1952int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
5d317c6a 1953 bool config_hash)
619c5cb6 1954{
3b603066 1955 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1956
1957 /* Although RSS is meaningless when there is a single HW queue we
1958 * still need it enabled in order to have HW Rx hash generated.
1959 *
1960 * if (!is_eth_multi(bp))
1961 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1962 */
1963
96305234 1964 params.rss_obj = rss_obj;
619c5cb6
VZ
1965
1966 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1967
96305234 1968 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
619c5cb6 1969
96305234
DK
1970 /* RSS configuration */
1971 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1972 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1973 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1974 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
5d317c6a
MS
1975 if (rss_obj->udp_rss_v4)
1976 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1977 if (rss_obj->udp_rss_v6)
1978 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
619c5cb6 1979
96305234
DK
1980 /* Hash bits */
1981 params.rss_result_mask = MULTI_MASK;
619c5cb6 1982
5d317c6a 1983 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 1984
96305234
DK
1985 if (config_hash) {
1986 /* RSS keys */
8376d0bc 1987 prandom_bytes(params.rss_key, sizeof(params.rss_key));
96305234 1988 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
1989 }
1990
1991 return bnx2x_config_rss(bp, &params);
1992}
1993
1191cb83 1994static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 1995{
3b603066 1996 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
1997
1998 /* Prepare parameters for function state transitions */
1999 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2000
2001 func_params.f_obj = &bp->func_obj;
2002 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2003
2004 func_params.params.hw_init.load_phase = load_code;
2005
2006 return bnx2x_func_state_change(bp, &func_params);
2007}
2008
2009/*
2010 * Cleans the object that have internal lists without sending
2011 * ramrods. Should be run when interrutps are disabled.
2012 */
2013static void bnx2x_squeeze_objects(struct bnx2x *bp)
2014{
2015 int rc;
2016 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2017 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2018 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2019
2020 /***************** Cleanup MACs' object first *************************/
2021
2022 /* Wait for completion of requested */
2023 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2024 /* Perform a dry cleanup */
2025 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2026
2027 /* Clean ETH primary MAC */
2028 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2029 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2030 &ramrod_flags);
2031 if (rc != 0)
2032 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2033
2034 /* Cleanup UC list */
2035 vlan_mac_flags = 0;
2036 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2037 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2038 &ramrod_flags);
2039 if (rc != 0)
2040 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2041
2042 /***************** Now clean mcast object *****************************/
2043 rparam.mcast_obj = &bp->mcast_obj;
2044 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2045
2046 /* Add a DEL command... */
2047 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2048 if (rc < 0)
51c1a580
MS
2049 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2050 rc);
619c5cb6
VZ
2051
2052 /* ...and wait until all pending commands are cleared */
2053 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2054 while (rc != 0) {
2055 if (rc < 0) {
2056 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2057 rc);
2058 return;
2059 }
2060
2061 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2062 }
2063}
2064
2065#ifndef BNX2X_STOP_ON_ERROR
2066#define LOAD_ERROR_EXIT(bp, label) \
2067 do { \
2068 (bp)->state = BNX2X_STATE_ERROR; \
2069 goto label; \
2070 } while (0)
55c11941
MS
2071
2072#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2073 do { \
2074 bp->cnic_loaded = false; \
2075 goto label; \
2076 } while (0)
2077#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2078#define LOAD_ERROR_EXIT(bp, label) \
2079 do { \
2080 (bp)->state = BNX2X_STATE_ERROR; \
2081 (bp)->panic = 1; \
2082 return -EBUSY; \
2083 } while (0)
55c11941
MS
2084#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2085 do { \
2086 bp->cnic_loaded = false; \
2087 (bp)->panic = 1; \
2088 return -EBUSY; \
2089 } while (0)
2090#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2091
ad5afc89
AE
2092static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2093{
2094 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2095 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2096 return;
2097}
2098
2099static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2100{
8db573ba 2101 int num_groups, vf_headroom = 0;
ad5afc89 2102 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2103
ad5afc89
AE
2104 /* number of queues for statistics is number of eth queues + FCoE */
2105 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2106
ad5afc89
AE
2107 /* Total number of FW statistics requests =
2108 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2109 * and fcoe l2 queue) stats + num of queues (which includes another 1
2110 * for fcoe l2 queue if applicable)
2111 */
2112 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2113
8db573ba
AE
2114 /* vf stats appear in the request list, but their data is allocated by
2115 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2116 * it is used to determine where to place the vf stats queries in the
2117 * request struct
2118 */
2119 if (IS_SRIOV(bp))
6411280a 2120 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2121
ad5afc89
AE
2122 /* Request is built from stats_query_header and an array of
2123 * stats_query_cmd_group each of which contains
2124 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2125 * configured in the stats_query_header.
2126 */
2127 num_groups =
8db573ba
AE
2128 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2129 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2130 1 : 0));
2131
8db573ba
AE
2132 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2133 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2134 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2135 num_groups * sizeof(struct stats_query_cmd_group);
2136
2137 /* Data for statistics requests + stats_counter
2138 * stats_counter holds per-STORM counters that are incremented
2139 * when STORM has finished with the current request.
2140 * memory for FCoE offloaded statistics are counted anyway,
2141 * even if they will not be sent.
2142 * VF stats are not accounted for here as the data of VF stats is stored
2143 * in memory allocated by the VF, not here.
2144 */
2145 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2146 sizeof(struct per_pf_stats) +
2147 sizeof(struct fcoe_statistics_params) +
2148 sizeof(struct per_queue_stats) * num_queue_stats +
2149 sizeof(struct stats_counter);
2150
2151 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2152 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2153
2154 /* Set shortcuts */
2155 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2156 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2157 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2158 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2159 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2160 bp->fw_stats_req_sz;
2161
2162 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2163 U64_HI(bp->fw_stats_req_mapping),
2164 U64_LO(bp->fw_stats_req_mapping));
2165 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2166 U64_HI(bp->fw_stats_data_mapping),
2167 U64_LO(bp->fw_stats_data_mapping));
2168 return 0;
2169
2170alloc_mem_err:
2171 bnx2x_free_fw_stats_mem(bp);
2172 BNX2X_ERR("Can't allocate FW stats memory\n");
2173 return -ENOMEM;
2174}
2175
2176/* send load request to mcp and analyze response */
2177static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2178{
2179 /* init fw_seq */
2180 bp->fw_seq =
2181 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2182 DRV_MSG_SEQ_NUMBER_MASK);
2183 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2184
2185 /* Get current FW pulse sequence */
2186 bp->fw_drv_pulse_wr_seq =
2187 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2188 DRV_PULSE_SEQ_MASK);
2189 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2190
2191 /* load request */
2192 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2193 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2194
2195 /* if mcp fails to respond we must abort */
2196 if (!(*load_code)) {
2197 BNX2X_ERR("MCP response failure, aborting\n");
2198 return -EBUSY;
2199 }
2200
2201 /* If mcp refused (e.g. other port is in diagnostic mode) we
2202 * must abort
2203 */
2204 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2205 BNX2X_ERR("MCP refused load request, aborting\n");
2206 return -EBUSY;
2207 }
2208 return 0;
2209}
2210
2211/* check whether another PF has already loaded FW to chip. In
2212 * virtualized environments a pf from another VM may have already
2213 * initialized the device including loading FW
2214 */
2215int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2216{
2217 /* is another pf loaded on this engine? */
2218 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2219 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2220 /* build my FW version dword */
2221 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2222 (BCM_5710_FW_MINOR_VERSION << 8) +
2223 (BCM_5710_FW_REVISION_VERSION << 16) +
2224 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2225
2226 /* read loaded FW from chip */
2227 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2228
2229 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2230 loaded_fw, my_fw);
2231
2232 /* abort nic load if version mismatch */
2233 if (my_fw != loaded_fw) {
2234 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
452427b0 2235 loaded_fw, my_fw);
ad5afc89
AE
2236 return -EBUSY;
2237 }
2238 }
2239 return 0;
2240}
2241
2242/* returns the "mcp load_code" according to global load_count array */
2243static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2244{
2245 int path = BP_PATH(bp);
2246
2247 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2248 path, load_count[path][0], load_count[path][1],
2249 load_count[path][2]);
2250 load_count[path][0]++;
2251 load_count[path][1 + port]++;
2252 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2253 path, load_count[path][0], load_count[path][1],
2254 load_count[path][2]);
2255 if (load_count[path][0] == 1)
2256 return FW_MSG_CODE_DRV_LOAD_COMMON;
2257 else if (load_count[path][1 + port] == 1)
2258 return FW_MSG_CODE_DRV_LOAD_PORT;
2259 else
2260 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2261}
2262
2263/* mark PMF if applicable */
2264static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2265{
2266 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2267 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2268 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2269 bp->port.pmf = 1;
2270 /* We need the barrier to ensure the ordering between the
2271 * writing to bp->port.pmf here and reading it from the
2272 * bnx2x_periodic_task().
2273 */
2274 smp_mb();
2275 } else {
2276 bp->port.pmf = 0;
452427b0
YM
2277 }
2278
ad5afc89
AE
2279 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2280}
2281
2282static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2283{
2284 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2285 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2286 (bp->common.shmem2_base)) {
2287 if (SHMEM2_HAS(bp, dcc_support))
2288 SHMEM2_WR(bp, dcc_support,
2289 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2290 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2291 if (SHMEM2_HAS(bp, afex_driver_support))
2292 SHMEM2_WR(bp, afex_driver_support,
2293 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2294 }
2295
2296 /* Set AFEX default VLAN tag to an invalid value */
2297 bp->afex_def_vlan_tag = -1;
452427b0
YM
2298}
2299
1191cb83
ED
2300/**
2301 * bnx2x_bz_fp - zero content of the fastpath structure.
2302 *
2303 * @bp: driver handle
2304 * @index: fastpath index to be zeroed
2305 *
2306 * Makes sure the contents of the bp->fp[index].napi is kept
2307 * intact.
2308 */
2309static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2310{
2311 struct bnx2x_fastpath *fp = &bp->fp[index];
15192a8c 2312
65565884 2313 int cos;
1191cb83 2314 struct napi_struct orig_napi = fp->napi;
15192a8c 2315 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1191cb83 2316 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2317 if (fp->tpa_info)
2318 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2319 sizeof(struct bnx2x_agg_info));
2320 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2321
2322 /* Restore the NAPI object as it has been already initialized */
2323 fp->napi = orig_napi;
15192a8c 2324 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2325 fp->bp = bp;
2326 fp->index = index;
2327 if (IS_ETH_FP(fp))
2328 fp->max_cos = bp->max_cos;
2329 else
2330 /* Special queues support only one CoS */
2331 fp->max_cos = 1;
2332
65565884 2333 /* Init txdata pointers */
65565884
MS
2334 if (IS_FCOE_FP(fp))
2335 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2336 if (IS_ETH_FP(fp))
2337 for_each_cos_in_tx_queue(fp, cos)
2338 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2339 BNX2X_NUM_ETH_QUEUES(bp) + index];
2340
1191cb83
ED
2341 /*
2342 * set the tpa flag for each queue. The tpa flag determines the queue
2343 * minimal size so it must be set prior to queue memory allocation
2344 */
2345 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2346 (bp->flags & GRO_ENABLE_FLAG &&
2347 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2348 if (bp->flags & TPA_ENABLE_FLAG)
2349 fp->mode = TPA_MODE_LRO;
2350 else if (bp->flags & GRO_ENABLE_FLAG)
2351 fp->mode = TPA_MODE_GRO;
2352
1191cb83
ED
2353 /* We don't want TPA on an FCoE L2 ring */
2354 if (IS_FCOE_FP(fp))
2355 fp->disable_tpa = 1;
55c11941
MS
2356}
2357
2358int bnx2x_load_cnic(struct bnx2x *bp)
2359{
2360 int i, rc, port = BP_PORT(bp);
2361
2362 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2363
2364 mutex_init(&bp->cnic_mutex);
2365
ad5afc89
AE
2366 if (IS_PF(bp)) {
2367 rc = bnx2x_alloc_mem_cnic(bp);
2368 if (rc) {
2369 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2370 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2371 }
55c11941
MS
2372 }
2373
2374 rc = bnx2x_alloc_fp_mem_cnic(bp);
2375 if (rc) {
2376 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2377 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2378 }
2379
2380 /* Update the number of queues with the cnic queues */
2381 rc = bnx2x_set_real_num_queues(bp, 1);
2382 if (rc) {
2383 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2384 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2385 }
2386
2387 /* Add all CNIC NAPI objects */
2388 bnx2x_add_all_napi_cnic(bp);
2389 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2390 bnx2x_napi_enable_cnic(bp);
2391
2392 rc = bnx2x_init_hw_func_cnic(bp);
2393 if (rc)
2394 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2395
2396 bnx2x_nic_init_cnic(bp);
2397
ad5afc89
AE
2398 if (IS_PF(bp)) {
2399 /* Enable Timer scan */
2400 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2401
2402 /* setup cnic queues */
2403 for_each_cnic_queue(bp, i) {
2404 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2405 if (rc) {
2406 BNX2X_ERR("Queue setup failed\n");
2407 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2408 }
55c11941
MS
2409 }
2410 }
2411
2412 /* Initialize Rx filter. */
2413 netif_addr_lock_bh(bp->dev);
2414 bnx2x_set_rx_mode(bp->dev);
2415 netif_addr_unlock_bh(bp->dev);
2416
2417 /* re-read iscsi info */
2418 bnx2x_get_iscsi_info(bp);
2419 bnx2x_setup_cnic_irq_info(bp);
2420 bnx2x_setup_cnic_info(bp);
2421 bp->cnic_loaded = true;
2422 if (bp->state == BNX2X_STATE_OPEN)
2423 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2424
2425
2426 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2427
2428 return 0;
2429
2430#ifndef BNX2X_STOP_ON_ERROR
2431load_error_cnic2:
2432 /* Disable Timer scan */
2433 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2434
2435load_error_cnic1:
2436 bnx2x_napi_disable_cnic(bp);
2437 /* Update the number of queues without the cnic queues */
2438 rc = bnx2x_set_real_num_queues(bp, 0);
2439 if (rc)
2440 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2441load_error_cnic0:
2442 BNX2X_ERR("CNIC-related load failed\n");
2443 bnx2x_free_fp_mem_cnic(bp);
2444 bnx2x_free_mem_cnic(bp);
2445 return rc;
2446#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2447}
2448
9f6c9258
DK
2449/* must be called with rtnl_lock */
2450int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2451{
619c5cb6 2452 int port = BP_PORT(bp);
ad5afc89 2453 int i, rc = 0, load_code = 0;
9f6c9258 2454
55c11941
MS
2455 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2456 DP(NETIF_MSG_IFUP,
2457 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2458
9f6c9258 2459#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2460 if (unlikely(bp->panic)) {
2461 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2462 return -EPERM;
51c1a580 2463 }
9f6c9258
DK
2464#endif
2465
2466 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2467
2ae17f66
VZ
2468 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2469 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2470 &bp->last_reported_link.link_report_flags);
2ae17f66 2471
ad5afc89
AE
2472 if (IS_PF(bp))
2473 /* must be called before memory allocation and HW init */
2474 bnx2x_ilt_set_info(bp);
523224a3 2475
6383c0b3
AE
2476 /*
2477 * Zero fastpath structures preserving invariants like napi, which are
2478 * allocated only once, fp index, max_cos, bp pointer.
65565884 2479 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2480 */
51c1a580 2481 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2482 for_each_queue(bp, i)
2483 bnx2x_bz_fp(bp, i);
55c11941
MS
2484 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2485 bp->num_cnic_queues) *
2486 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2487
55c11941 2488 bp->fcoe_init = false;
6383c0b3 2489
a8c94b91
VZ
2490 /* Set the receive queues buffer size */
2491 bnx2x_set_rx_buf_size(bp);
2492
ad5afc89
AE
2493 if (IS_PF(bp)) {
2494 rc = bnx2x_alloc_mem(bp);
2495 if (rc) {
2496 BNX2X_ERR("Unable to allocate bp memory\n");
2497 return rc;
2498 }
2499 }
2500
2501 /* Allocated memory for FW statistics */
2502 if (bnx2x_alloc_fw_stats_mem(bp))
2503 LOAD_ERROR_EXIT(bp, load_error0);
2504
2505 /* need to be done after alloc mem, since it's self adjusting to amount
2506 * of memory available for RSS queues
2507 */
2508 rc = bnx2x_alloc_fp_mem(bp);
2509 if (rc) {
2510 BNX2X_ERR("Unable to allocate memory for fps\n");
2511 LOAD_ERROR_EXIT(bp, load_error0);
2512 }
d6214d7a 2513
8d9ac297
AE
2514 /* request pf to initialize status blocks */
2515 if (IS_VF(bp)) {
2516 rc = bnx2x_vfpf_init(bp);
2517 if (rc)
2518 LOAD_ERROR_EXIT(bp, load_error0);
2519 }
2520
b3b83c3f
DK
2521 /* As long as bnx2x_alloc_mem() may possibly update
2522 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2523 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2524 */
55c11941 2525 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2526 if (rc) {
ec6ba945 2527 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2528 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2529 }
2530
6383c0b3
AE
2531 /* configure multi cos mappings in kernel.
2532 * this configuration may be overriden by a multi class queue discipline
2533 * or by a dcbx negotiation result.
2534 */
2535 bnx2x_setup_tc(bp->dev, bp->max_cos);
2536
26614ba5
MS
2537 /* Add all NAPI objects */
2538 bnx2x_add_all_napi(bp);
55c11941 2539 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2540 bnx2x_napi_enable(bp);
2541
ad5afc89
AE
2542 if (IS_PF(bp)) {
2543 /* set pf load just before approaching the MCP */
2544 bnx2x_set_pf_load(bp);
2545
2546 /* if mcp exists send load request and analyze response */
2547 if (!BP_NOMCP(bp)) {
2548 /* attempt to load pf */
2549 rc = bnx2x_nic_load_request(bp, &load_code);
2550 if (rc)
2551 LOAD_ERROR_EXIT(bp, load_error1);
2552
2553 /* what did mcp say? */
2554 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2555 if (rc) {
2556 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2557 LOAD_ERROR_EXIT(bp, load_error2);
2558 }
ad5afc89
AE
2559 } else {
2560 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2561 }
9f6c9258 2562
ad5afc89
AE
2563 /* mark pmf if applicable */
2564 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2565
ad5afc89
AE
2566 /* Init Function state controlling object */
2567 bnx2x__init_func_obj(bp);
6383c0b3 2568
ad5afc89
AE
2569 /* Initialize HW */
2570 rc = bnx2x_init_hw(bp, load_code);
2571 if (rc) {
2572 BNX2X_ERR("HW init failed, aborting\n");
2573 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2574 LOAD_ERROR_EXIT(bp, load_error2);
2575 }
9f6c9258
DK
2576 }
2577
d6214d7a
DK
2578 /* Connect to IRQs */
2579 rc = bnx2x_setup_irqs(bp);
523224a3 2580 if (rc) {
ad5afc89
AE
2581 BNX2X_ERR("setup irqs failed\n");
2582 if (IS_PF(bp))
2583 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2584 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2585 }
2586
9f6c9258
DK
2587 /* Setup NIC internals and enable interrupts */
2588 bnx2x_nic_init(bp, load_code);
2589
619c5cb6 2590 /* Init per-function objects */
ad5afc89
AE
2591 if (IS_PF(bp)) {
2592 bnx2x_init_bp_objs(bp);
b56e9670 2593 bnx2x_iov_nic_init(bp);
a3348722 2594
ad5afc89
AE
2595 /* Set AFEX default VLAN tag to an invalid value */
2596 bp->afex_def_vlan_tag = -1;
2597 bnx2x_nic_load_afex_dcc(bp, load_code);
2598 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2599 rc = bnx2x_func_start(bp);
2600 if (rc) {
2601 BNX2X_ERR("Function start failed!\n");
2602 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2603
619c5cb6 2604 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2605 }
9f6c9258 2606
ad5afc89
AE
2607 /* Send LOAD_DONE command to MCP */
2608 if (!BP_NOMCP(bp)) {
2609 load_code = bnx2x_fw_command(bp,
2610 DRV_MSG_CODE_LOAD_DONE, 0);
2611 if (!load_code) {
2612 BNX2X_ERR("MCP response failure, aborting\n");
2613 rc = -EBUSY;
2614 LOAD_ERROR_EXIT(bp, load_error3);
2615 }
2616 }
9f6c9258 2617
ad5afc89
AE
2618 /* setup the leading queue */
2619 rc = bnx2x_setup_leading(bp);
51c1a580 2620 if (rc) {
ad5afc89 2621 BNX2X_ERR("Setup leading failed!\n");
55c11941 2622 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2623 }
523224a3 2624
ad5afc89
AE
2625 /* set up the rest of the queues */
2626 for_each_nondefault_eth_queue(bp, i) {
2627 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2628 if (rc) {
2629 BNX2X_ERR("Queue setup failed\n");
2630 LOAD_ERROR_EXIT(bp, load_error3);
2631 }
2632 }
2633
2634 /* setup rss */
2635 rc = bnx2x_init_rss_pf(bp);
2636 if (rc) {
2637 BNX2X_ERR("PF RSS init failed\n");
2638 LOAD_ERROR_EXIT(bp, load_error3);
2639 }
8d9ac297
AE
2640
2641 } else { /* vf */
2642 for_each_eth_queue(bp, i) {
2643 rc = bnx2x_vfpf_setup_q(bp, i);
2644 if (rc) {
2645 BNX2X_ERR("Queue setup failed\n");
2646 LOAD_ERROR_EXIT(bp, load_error3);
2647 }
2648 }
51c1a580 2649 }
619c5cb6 2650
523224a3
DK
2651 /* Now when Clients are configured we are ready to work */
2652 bp->state = BNX2X_STATE_OPEN;
2653
619c5cb6 2654 /* Configure a ucast MAC */
ad5afc89
AE
2655 if (IS_PF(bp))
2656 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297
AE
2657 else /* vf */
2658 rc = bnx2x_vfpf_set_mac(bp);
51c1a580
MS
2659 if (rc) {
2660 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2661 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2662 }
6e30dd4e 2663
ad5afc89 2664 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2665 bnx2x_update_max_mf_config(bp, bp->pending_max);
2666 bp->pending_max = 0;
2667 }
2668
ad5afc89
AE
2669 if (bp->port.pmf) {
2670 rc = bnx2x_initial_phy_init(bp, load_mode);
2671 if (rc)
2672 LOAD_ERROR_EXIT(bp, load_error3);
2673 }
c63da990 2674 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2675
619c5cb6
VZ
2676 /* Start fast path */
2677
2678 /* Initialize Rx filter. */
2679 netif_addr_lock_bh(bp->dev);
6e30dd4e 2680 bnx2x_set_rx_mode(bp->dev);
619c5cb6 2681 netif_addr_unlock_bh(bp->dev);
6e30dd4e 2682
619c5cb6 2683 /* Start the Tx */
9f6c9258
DK
2684 switch (load_mode) {
2685 case LOAD_NORMAL:
523224a3
DK
2686 /* Tx queue should be only reenabled */
2687 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2688 break;
2689
2690 case LOAD_OPEN:
2691 netif_tx_start_all_queues(bp->dev);
523224a3 2692 smp_mb__after_clear_bit();
9f6c9258
DK
2693 break;
2694
2695 case LOAD_DIAG:
8970b2e4 2696 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2697 bp->state = BNX2X_STATE_DIAG;
2698 break;
2699
2700 default:
2701 break;
2702 }
2703
00253a8c 2704 if (bp->port.pmf)
4c704899 2705 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2706 else
9f6c9258
DK
2707 bnx2x__link_status_update(bp);
2708
2709 /* start the timer */
2710 mod_timer(&bp->timer, jiffies + bp->current_interval);
2711
55c11941
MS
2712 if (CNIC_ENABLED(bp))
2713 bnx2x_load_cnic(bp);
9f6c9258 2714
ad5afc89
AE
2715 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2716 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2717 u32 val;
2718 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2719 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2720 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2721 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2722 }
2723
619c5cb6 2724 /* Wait for all pending SP commands to complete */
ad5afc89 2725 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2726 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2727 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2728 return -EBUSY;
2729 }
6891dd25 2730
9876879f
BW
2731 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2732 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2733 bnx2x_dcbx_init(bp, false);
2734
55c11941
MS
2735 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2736
9f6c9258
DK
2737 return 0;
2738
619c5cb6 2739#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2740load_error3:
ad5afc89
AE
2741 if (IS_PF(bp)) {
2742 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2743
ad5afc89
AE
2744 /* Clean queueable objects */
2745 bnx2x_squeeze_objects(bp);
2746 }
619c5cb6 2747
9f6c9258
DK
2748 /* Free SKBs, SGEs, TPA pool and driver internals */
2749 bnx2x_free_skbs(bp);
ec6ba945 2750 for_each_rx_queue(bp, i)
9f6c9258 2751 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2752
9f6c9258 2753 /* Release IRQs */
d6214d7a
DK
2754 bnx2x_free_irq(bp);
2755load_error2:
ad5afc89 2756 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2757 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2758 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2759 }
2760
2761 bp->port.pmf = 0;
9f6c9258
DK
2762load_error1:
2763 bnx2x_napi_disable(bp);
ad5afc89 2764
889b9af3 2765 /* clear pf_load status, as it was already set */
ad5afc89
AE
2766 if (IS_PF(bp))
2767 bnx2x_clear_pf_load(bp);
d6214d7a 2768load_error0:
ad5afc89
AE
2769 bnx2x_free_fp_mem(bp);
2770 bnx2x_free_fw_stats_mem(bp);
9f6c9258
DK
2771 bnx2x_free_mem(bp);
2772
2773 return rc;
619c5cb6 2774#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2775}
2776
ad5afc89
AE
2777static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2778{
2779 u8 rc = 0, cos, i;
2780
2781 /* Wait until tx fastpath tasks complete */
2782 for_each_tx_queue(bp, i) {
2783 struct bnx2x_fastpath *fp = &bp->fp[i];
2784
2785 for_each_cos_in_tx_queue(fp, cos)
2786 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2787 if (rc)
2788 return rc;
2789 }
2790 return 0;
2791}
2792
9f6c9258 2793/* must be called with rtnl_lock */
5d07d868 2794int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2795{
2796 int i;
c9ee9206
VZ
2797 bool global = false;
2798
55c11941
MS
2799 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2800
9ce392d4 2801 /* mark driver is unloaded in shmem2 */
ad5afc89 2802 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2803 u32 val;
2804 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2805 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2806 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2807 }
2808
80bfe5cc 2809 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2810 (bp->state == BNX2X_STATE_CLOSED ||
2811 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2812 /* We can get here if the driver has been unloaded
2813 * during parity error recovery and is either waiting for a
2814 * leader to complete or for other functions to unload and
2815 * then ifdown has been issued. In this case we want to
2816 * unload and let other functions to complete a recovery
2817 * process.
2818 */
9f6c9258
DK
2819 bp->recovery_state = BNX2X_RECOVERY_DONE;
2820 bp->is_leader = 0;
c9ee9206
VZ
2821 bnx2x_release_leader_lock(bp);
2822 smp_mb();
2823
51c1a580
MS
2824 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2825 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2826 return -EINVAL;
2827 }
2828
80bfe5cc
YM
2829 /* Nothing to do during unload if previous bnx2x_nic_load()
2830 * have not completed succesfully - all resourses are released.
2831 *
2832 * we can get here only after unsuccessful ndo_* callback, during which
2833 * dev->IFF_UP flag is still on.
2834 */
2835 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2836 return 0;
2837
2838 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2839 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2840 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2841 */
2842 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2843 smp_mb();
2844
55c11941
MS
2845 if (CNIC_LOADED(bp))
2846 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2847
9505ee37
VZ
2848 /* Stop Tx */
2849 bnx2x_tx_disable(bp);
65565884 2850 netdev_reset_tc(bp->dev);
9505ee37 2851
9f6c9258 2852 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2853
9f6c9258 2854 del_timer_sync(&bp->timer);
f85582f8 2855
ad5afc89
AE
2856 if (IS_PF(bp)) {
2857 /* Set ALWAYS_ALIVE bit in shmem */
2858 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2859 bnx2x_drv_pulse(bp);
2860 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2861 bnx2x_save_statistics(bp);
2862 }
9f6c9258 2863
ad5afc89
AE
2864 /* wait till consumers catch up with producers in all queues */
2865 bnx2x_drain_tx_queues(bp);
9f6c9258 2866
9b176b6b
AE
2867 /* if VF indicate to PF this function is going down (PF will delete sp
2868 * elements and clear initializations
2869 */
2870 if (IS_VF(bp))
2871 bnx2x_vfpf_close_vf(bp);
2872 else if (unload_mode != UNLOAD_RECOVERY)
2873 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2874 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2875 else {
c9ee9206
VZ
2876 /* Send the UNLOAD_REQUEST to the MCP */
2877 bnx2x_send_unload_req(bp, unload_mode);
2878
2879 /*
2880 * Prevent transactions to host from the functions on the
2881 * engine that doesn't reset global blocks in case of global
2882 * attention once gloabl blocks are reset and gates are opened
2883 * (the engine which leader will perform the recovery
2884 * last).
2885 */
2886 if (!CHIP_IS_E1x(bp))
2887 bnx2x_pf_disable(bp);
2888
2889 /* Disable HW interrupts, NAPI */
523224a3 2890 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2891 /* Delete all NAPI objects */
2892 bnx2x_del_all_napi(bp);
55c11941
MS
2893 if (CNIC_LOADED(bp))
2894 bnx2x_del_all_napi_cnic(bp);
523224a3 2895 /* Release IRQs */
d6214d7a 2896 bnx2x_free_irq(bp);
c9ee9206
VZ
2897
2898 /* Report UNLOAD_DONE to MCP */
5d07d868 2899 bnx2x_send_unload_done(bp, false);
523224a3 2900 }
9f6c9258 2901
619c5cb6
VZ
2902 /*
2903 * At this stage no more interrupts will arrive so we may safly clean
2904 * the queueable objects here in case they failed to get cleaned so far.
2905 */
ad5afc89
AE
2906 if (IS_PF(bp))
2907 bnx2x_squeeze_objects(bp);
619c5cb6 2908
79616895
VZ
2909 /* There should be no more pending SP commands at this stage */
2910 bp->sp_state = 0;
2911
9f6c9258
DK
2912 bp->port.pmf = 0;
2913
2914 /* Free SKBs, SGEs, TPA pool and driver internals */
2915 bnx2x_free_skbs(bp);
55c11941
MS
2916 if (CNIC_LOADED(bp))
2917 bnx2x_free_skbs_cnic(bp);
ec6ba945 2918 for_each_rx_queue(bp, i)
9f6c9258 2919 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2920
ad5afc89
AE
2921 bnx2x_free_fp_mem(bp);
2922 if (CNIC_LOADED(bp))
55c11941 2923 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2924
ad5afc89
AE
2925 if (IS_PF(bp)) {
2926 bnx2x_free_mem(bp);
2927 if (CNIC_LOADED(bp))
2928 bnx2x_free_mem_cnic(bp);
2929 }
9f6c9258 2930 bp->state = BNX2X_STATE_CLOSED;
55c11941 2931 bp->cnic_loaded = false;
9f6c9258 2932
c9ee9206
VZ
2933 /* Check if there are pending parity attentions. If there are - set
2934 * RECOVERY_IN_PROGRESS.
2935 */
ad5afc89 2936 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2937 bnx2x_set_reset_in_progress(bp);
2938
2939 /* Set RESET_IS_GLOBAL if needed */
2940 if (global)
2941 bnx2x_set_reset_global(bp);
2942 }
2943
2944
9f6c9258
DK
2945 /* The last driver must disable a "close the gate" if there is no
2946 * parity attention or "process kill" pending.
2947 */
ad5afc89
AE
2948 if (IS_PF(bp) &&
2949 !bnx2x_clear_pf_load(bp) &&
2950 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2951 bnx2x_disable_close_the_gate(bp);
2952
55c11941
MS
2953 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2954
9f6c9258
DK
2955 return 0;
2956}
f85582f8 2957
9f6c9258
DK
2958int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2959{
2960 u16 pmcsr;
2961
adf5f6a1
DK
2962 /* If there is no power capability, silently succeed */
2963 if (!bp->pm_cap) {
51c1a580 2964 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
2965 return 0;
2966 }
2967
9f6c9258
DK
2968 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2969
2970 switch (state) {
2971 case PCI_D0:
2972 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2973 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2974 PCI_PM_CTRL_PME_STATUS));
2975
2976 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2977 /* delay required during transition out of D3hot */
2978 msleep(20);
2979 break;
2980
2981 case PCI_D3hot:
2982 /* If there are other clients above don't
2983 shut down the power */
2984 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2985 return 0;
2986 /* Don't shut down the power for emulation and FPGA */
2987 if (CHIP_REV_IS_SLOW(bp))
2988 return 0;
2989
2990 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2991 pmcsr |= 3;
2992
2993 if (bp->wol)
2994 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2995
2996 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2997 pmcsr);
2998
2999 /* No more memory access after this point until
3000 * device is brought back to D0.
3001 */
3002 break;
3003
3004 default:
51c1a580 3005 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3006 return -EINVAL;
3007 }
3008 return 0;
3009}
3010
9f6c9258
DK
3011/*
3012 * net_device service functions
3013 */
d6214d7a 3014int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3015{
3016 int work_done = 0;
6383c0b3 3017 u8 cos;
9f6c9258
DK
3018 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3019 napi);
3020 struct bnx2x *bp = fp->bp;
3021
3022 while (1) {
3023#ifdef BNX2X_STOP_ON_ERROR
3024 if (unlikely(bp->panic)) {
3025 napi_complete(napi);
3026 return 0;
3027 }
3028#endif
3029
6383c0b3 3030 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3031 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3032 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3033
9f6c9258
DK
3034 if (bnx2x_has_rx_work(fp)) {
3035 work_done += bnx2x_rx_int(fp, budget - work_done);
3036
3037 /* must not complete if we consumed full budget */
3038 if (work_done >= budget)
3039 break;
3040 }
3041
3042 /* Fall out from the NAPI loop if needed */
3043 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3044
ec6ba945
VZ
3045 /* No need to update SB for FCoE L2 ring as long as
3046 * it's connected to the default SB and the SB
3047 * has been updated when NAPI was scheduled.
3048 */
3049 if (IS_FCOE_FP(fp)) {
3050 napi_complete(napi);
3051 break;
3052 }
9f6c9258 3053 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3054 /* bnx2x_has_rx_work() reads the status block,
3055 * thus we need to ensure that status block indices
3056 * have been actually read (bnx2x_update_fpsb_idx)
3057 * prior to this check (bnx2x_has_rx_work) so that
3058 * we won't write the "newer" value of the status block
3059 * to IGU (if there was a DMA right after
3060 * bnx2x_has_rx_work and if there is no rmb, the memory
3061 * reading (bnx2x_update_fpsb_idx) may be postponed
3062 * to right before bnx2x_ack_sb). In this case there
3063 * will never be another interrupt until there is
3064 * another update of the status block, while there
3065 * is still unhandled work.
3066 */
9f6c9258
DK
3067 rmb();
3068
3069 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3070 napi_complete(napi);
3071 /* Re-enable interrupts */
51c1a580 3072 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3073 "Update index to %d\n", fp->fp_hc_idx);
3074 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3075 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3076 IGU_INT_ENABLE, 1);
3077 break;
3078 }
3079 }
3080 }
3081
3082 return work_done;
3083}
3084
9f6c9258
DK
3085/* we split the first BD into headers and data BDs
3086 * to ease the pain of our fellow microcode engineers
3087 * we use one mapping for both BDs
9f6c9258 3088 */
91226790
DK
3089static u16 bnx2x_tx_split(struct bnx2x *bp,
3090 struct bnx2x_fp_txdata *txdata,
3091 struct sw_tx_bd *tx_buf,
3092 struct eth_tx_start_bd **tx_bd, u16 hlen,
3093 u16 bd_prod)
9f6c9258
DK
3094{
3095 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3096 struct eth_tx_bd *d_tx_bd;
3097 dma_addr_t mapping;
3098 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3099
3100 /* first fix first BD */
9f6c9258
DK
3101 h_tx_bd->nbytes = cpu_to_le16(hlen);
3102
91226790
DK
3103 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3104 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3105
3106 /* now get a new data BD
3107 * (after the pbd) and fill it */
3108 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3109 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3110
3111 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3112 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3113
3114 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3115 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3116 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3117
3118 /* this marks the BD as one that has no individual mapping */
3119 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3120
3121 DP(NETIF_MSG_TX_QUEUED,
3122 "TSO split data size is %d (%x:%x)\n",
3123 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3124
3125 /* update tx_bd */
3126 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3127
3128 return bd_prod;
3129}
3130
86564c3f
YM
3131#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3132#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3133static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3134{
86564c3f
YM
3135 __sum16 tsum = (__force __sum16) csum;
3136
9f6c9258 3137 if (fix > 0)
86564c3f
YM
3138 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3139 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3140
3141 else if (fix < 0)
86564c3f
YM
3142 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3143 csum_partial(t_header, -fix, 0)));
9f6c9258 3144
e2593fcd 3145 return bswab16(tsum);
9f6c9258
DK
3146}
3147
91226790 3148static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3149{
3150 u32 rc;
3151
3152 if (skb->ip_summed != CHECKSUM_PARTIAL)
3153 rc = XMIT_PLAIN;
3154
3155 else {
d0d9d8ef 3156 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
3157 rc = XMIT_CSUM_V6;
3158 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3159 rc |= XMIT_CSUM_TCP;
3160
3161 } else {
3162 rc = XMIT_CSUM_V4;
3163 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3164 rc |= XMIT_CSUM_TCP;
3165 }
3166 }
3167
5892b9e9
VZ
3168 if (skb_is_gso_v6(skb))
3169 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
3170 else if (skb_is_gso(skb))
3171 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
3172
3173 return rc;
3174}
3175
3176#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3177/* check if packet requires linearization (packet is too fragmented)
3178 no need to check fragmentation if page size > 8K (there will be no
3179 violation to FW restrictions) */
3180static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3181 u32 xmit_type)
3182{
3183 int to_copy = 0;
3184 int hlen = 0;
3185 int first_bd_sz = 0;
3186
3187 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3188 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3189
3190 if (xmit_type & XMIT_GSO) {
3191 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3192 /* Check if LSO packet needs to be copied:
3193 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3194 int wnd_size = MAX_FETCH_BD - 3;
3195 /* Number of windows to check */
3196 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3197 int wnd_idx = 0;
3198 int frag_idx = 0;
3199 u32 wnd_sum = 0;
3200
3201 /* Headers length */
3202 hlen = (int)(skb_transport_header(skb) - skb->data) +
3203 tcp_hdrlen(skb);
3204
3205 /* Amount of data (w/o headers) on linear part of SKB*/
3206 first_bd_sz = skb_headlen(skb) - hlen;
3207
3208 wnd_sum = first_bd_sz;
3209
3210 /* Calculate the first sum - it's special */
3211 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3212 wnd_sum +=
9e903e08 3213 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3214
3215 /* If there was data on linear skb data - check it */
3216 if (first_bd_sz > 0) {
3217 if (unlikely(wnd_sum < lso_mss)) {
3218 to_copy = 1;
3219 goto exit_lbl;
3220 }
3221
3222 wnd_sum -= first_bd_sz;
3223 }
3224
3225 /* Others are easier: run through the frag list and
3226 check all windows */
3227 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3228 wnd_sum +=
9e903e08 3229 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3230
3231 if (unlikely(wnd_sum < lso_mss)) {
3232 to_copy = 1;
3233 break;
3234 }
3235 wnd_sum -=
9e903e08 3236 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3237 }
3238 } else {
3239 /* in non-LSO too fragmented packet should always
3240 be linearized */
3241 to_copy = 1;
3242 }
3243 }
3244
3245exit_lbl:
3246 if (unlikely(to_copy))
3247 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3248 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3249 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3250 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3251
3252 return to_copy;
3253}
3254#endif
3255
91226790
DK
3256static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3257 u32 xmit_type)
f2e0899f 3258{
2297a2da
VZ
3259 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3260 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3261 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
3262 if ((xmit_type & XMIT_GSO_V6) &&
3263 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 3264 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3265}
3266
3267/**
e8920674 3268 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3269 *
e8920674
DK
3270 * @skb: packet skb
3271 * @pbd: parse BD
3272 * @xmit_type: xmit flags
f2e0899f 3273 */
91226790
DK
3274static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3275 struct eth_tx_parse_bd_e1x *pbd,
3276 u32 xmit_type)
f2e0899f
DK
3277{
3278 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3279 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3280 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3281
3282 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3283 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3284 pbd->tcp_pseudo_csum =
86564c3f
YM
3285 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3286 ip_hdr(skb)->daddr,
3287 0, IPPROTO_TCP, 0));
f2e0899f
DK
3288
3289 } else
3290 pbd->tcp_pseudo_csum =
86564c3f
YM
3291 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3292 &ipv6_hdr(skb)->daddr,
3293 0, IPPROTO_TCP, 0));
f2e0899f 3294
86564c3f
YM
3295 pbd->global_data |=
3296 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3297}
f85582f8 3298
f2e0899f 3299/**
e8920674 3300 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3301 *
e8920674
DK
3302 * @bp: driver handle
3303 * @skb: packet skb
3304 * @parsing_data: data to be updated
3305 * @xmit_type: xmit flags
f2e0899f 3306 *
91226790 3307 * 57712/578xx related
f2e0899f 3308 */
91226790
DK
3309static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3310 u32 *parsing_data, u32 xmit_type)
f2e0899f 3311{
e39aece7 3312 *parsing_data |=
2de67439 3313 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3314 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3315 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3316
e39aece7
VZ
3317 if (xmit_type & XMIT_CSUM_TCP) {
3318 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3319 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3320 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3321
e39aece7 3322 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3323 }
3324 /* We support checksum offload for TCP and UDP only.
3325 * No need to pass the UDP header length - it's a constant.
3326 */
3327 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3328}
3329
91226790
DK
3330static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3331 struct eth_tx_start_bd *tx_start_bd,
3332 u32 xmit_type)
93ef5c02 3333{
93ef5c02
DK
3334 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3335
91226790
DK
3336 if (xmit_type & XMIT_CSUM_V6)
3337 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3338
3339 if (!(xmit_type & XMIT_CSUM_TCP))
3340 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3341}
3342
f2e0899f 3343/**
e8920674 3344 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3345 *
e8920674
DK
3346 * @bp: driver handle
3347 * @skb: packet skb
3348 * @pbd: parse BD to be updated
3349 * @xmit_type: xmit flags
f2e0899f 3350 */
91226790
DK
3351static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3352 struct eth_tx_parse_bd_e1x *pbd,
3353 u32 xmit_type)
f2e0899f 3354{
e39aece7 3355 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3356
3357 /* for now NS flag is not used in Linux */
3358 pbd->global_data =
86564c3f
YM
3359 cpu_to_le16(hlen |
3360 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3361 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3362
3363 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3364 skb_network_header(skb)) >> 1;
f2e0899f 3365
e39aece7
VZ
3366 hlen += pbd->ip_hlen_w;
3367
3368 /* We support checksum offload for TCP and UDP only */
3369 if (xmit_type & XMIT_CSUM_TCP)
3370 hlen += tcp_hdrlen(skb) / 2;
3371 else
3372 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3373
3374 pbd->total_hlen_w = cpu_to_le16(hlen);
3375 hlen = hlen*2;
3376
3377 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3378 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3379
3380 } else {
3381 s8 fix = SKB_CS_OFF(skb); /* signed! */
3382
3383 DP(NETIF_MSG_TX_QUEUED,
3384 "hlen %d fix %d csum before fix %x\n",
3385 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3386
3387 /* HW bug: fixup the CSUM */
3388 pbd->tcp_pseudo_csum =
3389 bnx2x_csum_fix(skb_transport_header(skb),
3390 SKB_CS(skb), fix);
3391
3392 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3393 pbd->tcp_pseudo_csum);
3394 }
3395
3396 return hlen;
3397}
f85582f8 3398
9f6c9258
DK
3399/* called with netif_tx_lock
3400 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3401 * netif_wake_queue()
3402 */
3403netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3404{
3405 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3406
9f6c9258 3407 struct netdev_queue *txq;
6383c0b3 3408 struct bnx2x_fp_txdata *txdata;
9f6c9258 3409 struct sw_tx_bd *tx_buf;
619c5cb6 3410 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3411 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3412 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3413 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 3414 u32 pbd_e2_parsing_data = 0;
9f6c9258 3415 u16 pkt_prod, bd_prod;
65565884 3416 int nbd, txq_index;
9f6c9258
DK
3417 dma_addr_t mapping;
3418 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3419 int i;
3420 u8 hlen = 0;
3421 __le16 pkt_size = 0;
3422 struct ethhdr *eth;
3423 u8 mac_type = UNICAST_ADDRESS;
3424
3425#ifdef BNX2X_STOP_ON_ERROR
3426 if (unlikely(bp->panic))
3427 return NETDEV_TX_BUSY;
3428#endif
3429
6383c0b3
AE
3430 txq_index = skb_get_queue_mapping(skb);
3431 txq = netdev_get_tx_queue(dev, txq_index);
3432
55c11941 3433 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3434
65565884 3435 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3436
3437 /* enable this debug print to view the transmission queue being used
51c1a580 3438 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3439 txq_index, fp_index, txdata_index); */
9f6c9258 3440
6383c0b3 3441 /* enable this debug print to view the tranmission details
51c1a580
MS
3442 DP(NETIF_MSG_TX_QUEUED,
3443 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3444 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3445
6383c0b3 3446 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3447 skb_shinfo(skb)->nr_frags +
3448 BDS_PER_TX_PKT +
3449 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3450 /* Handle special storage cases separately */
c96bdc0c
DK
3451 if (txdata->tx_ring_size == 0) {
3452 struct bnx2x_eth_q_stats *q_stats =
3453 bnx2x_fp_qstats(bp, txdata->parent_fp);
3454 q_stats->driver_filtered_tx_pkt++;
3455 dev_kfree_skb(skb);
3456 return NETDEV_TX_OK;
3457 }
2de67439
YM
3458 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3459 netif_tx_stop_queue(txq);
c96bdc0c 3460 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3461
9f6c9258
DK
3462 return NETDEV_TX_BUSY;
3463 }
3464
51c1a580 3465 DP(NETIF_MSG_TX_QUEUED,
04c46736 3466 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3467 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3468 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3469 skb->len);
9f6c9258
DK
3470
3471 eth = (struct ethhdr *)skb->data;
3472
3473 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3474 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3475 if (is_broadcast_ether_addr(eth->h_dest))
3476 mac_type = BROADCAST_ADDRESS;
3477 else
3478 mac_type = MULTICAST_ADDRESS;
3479 }
3480
91226790 3481#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3482 /* First, check if we need to linearize the skb (due to FW
3483 restrictions). No need to check fragmentation if page size > 8K
3484 (there will be no violation to FW restrictions) */
3485 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3486 /* Statistics of linearization */
3487 bp->lin_cnt++;
3488 if (skb_linearize(skb) != 0) {
51c1a580
MS
3489 DP(NETIF_MSG_TX_QUEUED,
3490 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3491 dev_kfree_skb_any(skb);
3492 return NETDEV_TX_OK;
3493 }
3494 }
3495#endif
619c5cb6
VZ
3496 /* Map skb linear data for DMA */
3497 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3498 skb_headlen(skb), DMA_TO_DEVICE);
3499 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3500 DP(NETIF_MSG_TX_QUEUED,
3501 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3502 dev_kfree_skb_any(skb);
3503 return NETDEV_TX_OK;
3504 }
9f6c9258
DK
3505 /*
3506 Please read carefully. First we use one BD which we mark as start,
3507 then we have a parsing info BD (used for TSO or xsum),
3508 and only then we have the rest of the TSO BDs.
3509 (don't forget to mark the last one as last,
3510 and to unmap only AFTER you write to the BD ...)
3511 And above all, all pdb sizes are in words - NOT DWORDS!
3512 */
3513
619c5cb6
VZ
3514 /* get current pkt produced now - advance it just before sending packet
3515 * since mapping of pages may fail and cause packet to be dropped
3516 */
6383c0b3
AE
3517 pkt_prod = txdata->tx_pkt_prod;
3518 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3519
619c5cb6
VZ
3520 /* get a tx_buf and first BD
3521 * tx_start_bd may be changed during SPLIT,
3522 * but first_bd will always stay first
3523 */
6383c0b3
AE
3524 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3525 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3526 first_bd = tx_start_bd;
9f6c9258
DK
3527
3528 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3529
91226790
DK
3530 /* header nbd: indirectly zero other flags! */
3531 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3532
3533 /* remember the first BD of the packet */
6383c0b3 3534 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3535 tx_buf->skb = skb;
3536 tx_buf->flags = 0;
3537
3538 DP(NETIF_MSG_TX_QUEUED,
3539 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3540 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3541
eab6d18d 3542 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3543 tx_start_bd->vlan_or_ethertype =
3544 cpu_to_le16(vlan_tx_tag_get(skb));
3545 tx_start_bd->bd_flags.as_bitfield |=
3546 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3547 } else {
3548 /* when transmitting in a vf, start bd must hold the ethertype
3549 * for fw to enforce it
3550 */
91226790 3551 if (IS_VF(bp))
dc1ba591
AE
3552 tx_start_bd->vlan_or_ethertype =
3553 cpu_to_le16(ntohs(eth->h_proto));
91226790 3554 else
dc1ba591
AE
3555 /* used by FW for packet accounting */
3556 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3557 }
9f6c9258 3558
91226790
DK
3559 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3560
9f6c9258
DK
3561 /* turn on parsing and get a BD */
3562 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3563
93ef5c02
DK
3564 if (xmit_type & XMIT_CSUM)
3565 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3566
619c5cb6 3567 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3568 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f
DK
3569 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3570 /* Set PBD in checksum offload case */
3571 if (xmit_type & XMIT_CSUM)
91226790 3572 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3573 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3574 &pbd_e2_parsing_data,
3575 xmit_type);
dc1ba591 3576
91226790
DK
3577 /* Add the macs to the parsing BD this is a vf */
3578 if (IS_VF(bp)) {
3579 /* override GRE parameters in BD */
3580 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3581 &pbd_e2->data.mac_addr.src_mid,
3582 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3583 eth->h_source);
91226790
DK
3584
3585 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3586 &pbd_e2->data.mac_addr.dst_mid,
3587 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3588 eth->h_dest);
3589 }
96bed4b9
YM
3590
3591 SET_FLAG(pbd_e2_parsing_data,
3592 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3593 } else {
96bed4b9 3594 u16 global_data = 0;
6383c0b3 3595 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3596 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3597 /* Set PBD in checksum offload case */
3598 if (xmit_type & XMIT_CSUM)
3599 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3600
96bed4b9
YM
3601 SET_FLAG(global_data,
3602 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3603 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3604 }
3605
f85582f8 3606 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3607 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3608 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3609 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3610 pkt_size = tx_start_bd->nbytes;
3611
51c1a580 3612 DP(NETIF_MSG_TX_QUEUED,
91226790 3613 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3614 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3615 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3616 tx_start_bd->bd_flags.as_bitfield,
3617 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3618
3619 if (xmit_type & XMIT_GSO) {
3620
3621 DP(NETIF_MSG_TX_QUEUED,
3622 "TSO packet len %d hlen %d total len %d tso size %d\n",
3623 skb->len, hlen, skb_headlen(skb),
3624 skb_shinfo(skb)->gso_size);
3625
3626 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3627
91226790
DK
3628 if (unlikely(skb_headlen(skb) > hlen)) {
3629 nbd++;
6383c0b3
AE
3630 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3631 &tx_start_bd, hlen,
91226790
DK
3632 bd_prod);
3633 }
619c5cb6 3634 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3635 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3636 xmit_type);
f2e0899f
DK
3637 else
3638 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 3639 }
2297a2da
VZ
3640
3641 /* Set the PBD's parsing_data field if not zero
3642 * (for the chips newer than 57711).
3643 */
3644 if (pbd_e2_parsing_data)
3645 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3646
9f6c9258
DK
3647 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3648
f85582f8 3649 /* Handle fragmented skb */
9f6c9258
DK
3650 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3651 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3652
9e903e08
ED
3653 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3654 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3655 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3656 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3657
51c1a580
MS
3658 DP(NETIF_MSG_TX_QUEUED,
3659 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3660
3661 /* we need unmap all buffers already mapped
3662 * for this SKB;
3663 * first_bd->nbd need to be properly updated
3664 * before call to bnx2x_free_tx_pkt
3665 */
3666 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3667 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3668 TX_BD(txdata->tx_pkt_prod),
3669 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3670 return NETDEV_TX_OK;
3671 }
3672
9f6c9258 3673 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3674 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3675 if (total_pkt_bd == NULL)
6383c0b3 3676 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3677
9f6c9258
DK
3678 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3679 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3680 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3681 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3682 nbd++;
9f6c9258
DK
3683
3684 DP(NETIF_MSG_TX_QUEUED,
3685 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3686 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3687 le16_to_cpu(tx_data_bd->nbytes));
3688 }
3689
3690 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3691
619c5cb6
VZ
3692 /* update with actual num BDs */
3693 first_bd->nbd = cpu_to_le16(nbd);
3694
9f6c9258
DK
3695 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3696
3697 /* now send a tx doorbell, counting the next BD
3698 * if the packet contains or ends with it
3699 */
3700 if (TX_BD_POFF(bd_prod) < nbd)
3701 nbd++;
3702
619c5cb6
VZ
3703 /* total_pkt_bytes should be set on the first data BD if
3704 * it's not an LSO packet and there is more than one
3705 * data BD. In this case pkt_size is limited by an MTU value.
3706 * However we prefer to set it for an LSO packet (while we don't
3707 * have to) in order to save some CPU cycles in a none-LSO
3708 * case, when we much more care about them.
3709 */
9f6c9258
DK
3710 if (total_pkt_bd != NULL)
3711 total_pkt_bd->total_pkt_bytes = pkt_size;
3712
523224a3 3713 if (pbd_e1x)
9f6c9258 3714 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3715 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3716 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3717 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3718 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3719 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3720 if (pbd_e2)
3721 DP(NETIF_MSG_TX_QUEUED,
3722 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
3723 pbd_e2,
3724 pbd_e2->data.mac_addr.dst_hi,
3725 pbd_e2->data.mac_addr.dst_mid,
3726 pbd_e2->data.mac_addr.dst_lo,
3727 pbd_e2->data.mac_addr.src_hi,
3728 pbd_e2->data.mac_addr.src_mid,
3729 pbd_e2->data.mac_addr.src_lo,
f2e0899f 3730 pbd_e2->parsing_data);
9f6c9258
DK
3731 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3732
2df1a70a
TH
3733 netdev_tx_sent_queue(txq, skb->len);
3734
8373c57d
WB
3735 skb_tx_timestamp(skb);
3736
6383c0b3 3737 txdata->tx_pkt_prod++;
9f6c9258
DK
3738 /*
3739 * Make sure that the BD data is updated before updating the producer
3740 * since FW might read the BD right after the producer is updated.
3741 * This is only applicable for weak-ordered memory model archs such
3742 * as IA-64. The following barrier is also mandatory since FW will
3743 * assumes packets must have BDs.
3744 */
3745 wmb();
3746
6383c0b3 3747 txdata->tx_db.data.prod += nbd;
9f6c9258 3748 barrier();
f85582f8 3749
6383c0b3 3750 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
3751
3752 mmiowb();
3753
6383c0b3 3754 txdata->tx_bd_prod += nbd;
9f6c9258 3755
7df2dc6b 3756 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
3757 netif_tx_stop_queue(txq);
3758
3759 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3760 * ordering of set_bit() in netif_tx_stop_queue() and read of
3761 * fp->bd_tx_cons */
3762 smp_mb();
3763
15192a8c 3764 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 3765 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
3766 netif_tx_wake_queue(txq);
3767 }
6383c0b3 3768 txdata->tx_pkt++;
9f6c9258
DK
3769
3770 return NETDEV_TX_OK;
3771}
f85582f8 3772
6383c0b3
AE
3773/**
3774 * bnx2x_setup_tc - routine to configure net_device for multi tc
3775 *
3776 * @netdev: net device to configure
3777 * @tc: number of traffic classes to enable
3778 *
3779 * callback connected to the ndo_setup_tc function pointer
3780 */
3781int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3782{
3783 int cos, prio, count, offset;
3784 struct bnx2x *bp = netdev_priv(dev);
3785
3786 /* setup tc must be called under rtnl lock */
3787 ASSERT_RTNL();
3788
3789 /* no traffic classes requested. aborting */
3790 if (!num_tc) {
3791 netdev_reset_tc(dev);
3792 return 0;
3793 }
3794
3795 /* requested to support too many traffic classes */
3796 if (num_tc > bp->max_cos) {
51c1a580
MS
3797 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3798 num_tc, bp->max_cos);
6383c0b3
AE
3799 return -EINVAL;
3800 }
3801
3802 /* declare amount of supported traffic classes */
3803 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 3804 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
3805 return -EINVAL;
3806 }
3807
3808 /* configure priority to traffic class mapping */
3809 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3810 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
3811 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3812 "mapping priority %d to tc %d\n",
6383c0b3
AE
3813 prio, bp->prio_to_cos[prio]);
3814 }
3815
3816
3817 /* Use this configuration to diffrentiate tc0 from other COSes
3818 This can be used for ets or pfc, and save the effort of setting
3819 up a multio class queue disc or negotiating DCBX with a switch
3820 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 3821 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
3822 for (prio = 1; prio < 16; prio++) {
3823 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 3824 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
3825 } */
3826
3827 /* configure traffic class to transmission queue mapping */
3828 for (cos = 0; cos < bp->max_cos; cos++) {
3829 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 3830 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 3831 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
3832 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3833 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
3834 cos, offset, count);
3835 }
3836
3837 return 0;
3838}
3839
9f6c9258
DK
3840/* called with rtnl_lock */
3841int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3842{
3843 struct sockaddr *addr = p;
3844 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 3845 int rc = 0;
9f6c9258 3846
51c1a580
MS
3847 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3848 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 3849 return -EINVAL;
51c1a580 3850 }
614c76df 3851
a3348722
BW
3852 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3853 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 3854 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 3855 return -EINVAL;
51c1a580 3856 }
9f6c9258 3857
619c5cb6
VZ
3858 if (netif_running(dev)) {
3859 rc = bnx2x_set_eth_mac(bp, false);
3860 if (rc)
3861 return rc;
3862 }
3863
9f6c9258 3864 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 3865
523224a3 3866 if (netif_running(dev))
619c5cb6 3867 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 3868
619c5cb6 3869 return rc;
9f6c9258
DK
3870}
3871
b3b83c3f
DK
3872static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3873{
3874 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3875 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 3876 u8 cos;
b3b83c3f
DK
3877
3878 /* Common */
55c11941 3879
b3b83c3f
DK
3880 if (IS_FCOE_IDX(fp_index)) {
3881 memset(sb, 0, sizeof(union host_hc_status_block));
3882 fp->status_blk_mapping = 0;
b3b83c3f 3883 } else {
b3b83c3f 3884 /* status blocks */
619c5cb6 3885 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3886 BNX2X_PCI_FREE(sb->e2_sb,
3887 bnx2x_fp(bp, fp_index,
3888 status_blk_mapping),
3889 sizeof(struct host_hc_status_block_e2));
3890 else
3891 BNX2X_PCI_FREE(sb->e1x_sb,
3892 bnx2x_fp(bp, fp_index,
3893 status_blk_mapping),
3894 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 3895 }
55c11941 3896
b3b83c3f
DK
3897 /* Rx */
3898 if (!skip_rx_queue(bp, fp_index)) {
3899 bnx2x_free_rx_bds(fp);
3900
3901 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3902 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3903 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3904 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3905 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3906
3907 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3908 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3909 sizeof(struct eth_fast_path_rx_cqe) *
3910 NUM_RCQ_BD);
3911
3912 /* SGE ring */
3913 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3914 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3915 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3916 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3917 }
3918
3919 /* Tx */
3920 if (!skip_tx_queue(bp, fp_index)) {
3921 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 3922 for_each_cos_in_tx_queue(fp, cos) {
65565884 3923 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 3924
51c1a580 3925 DP(NETIF_MSG_IFDOWN,
94f05b0f 3926 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
3927 fp_index, cos, txdata->cid);
3928
3929 BNX2X_FREE(txdata->tx_buf_ring);
3930 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3931 txdata->tx_desc_mapping,
3932 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3933 }
b3b83c3f
DK
3934 }
3935 /* end of fastpath */
3936}
3937
55c11941
MS
3938void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3939{
3940 int i;
3941 for_each_cnic_queue(bp, i)
3942 bnx2x_free_fp_mem_at(bp, i);
3943}
3944
b3b83c3f
DK
3945void bnx2x_free_fp_mem(struct bnx2x *bp)
3946{
3947 int i;
55c11941 3948 for_each_eth_queue(bp, i)
b3b83c3f
DK
3949 bnx2x_free_fp_mem_at(bp, i);
3950}
3951
1191cb83 3952static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
3953{
3954 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 3955 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
3956 bnx2x_fp(bp, index, sb_index_values) =
3957 (__le16 *)status_blk.e2_sb->sb.index_values;
3958 bnx2x_fp(bp, index, sb_running_index) =
3959 (__le16 *)status_blk.e2_sb->sb.running_index;
3960 } else {
3961 bnx2x_fp(bp, index, sb_index_values) =
3962 (__le16 *)status_blk.e1x_sb->sb.index_values;
3963 bnx2x_fp(bp, index, sb_running_index) =
3964 (__le16 *)status_blk.e1x_sb->sb.running_index;
3965 }
3966}
3967
1191cb83
ED
3968/* Returns the number of actually allocated BDs */
3969static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3970 int rx_ring_size)
3971{
3972 struct bnx2x *bp = fp->bp;
3973 u16 ring_prod, cqe_ring_prod;
3974 int i, failure_cnt = 0;
3975
3976 fp->rx_comp_cons = 0;
3977 cqe_ring_prod = ring_prod = 0;
3978
3979 /* This routine is called only during fo init so
3980 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3981 */
3982 for (i = 0; i < rx_ring_size; i++) {
3983 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3984 failure_cnt++;
3985 continue;
3986 }
3987 ring_prod = NEXT_RX_IDX(ring_prod);
3988 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3989 WARN_ON(ring_prod <= (i - failure_cnt));
3990 }
3991
3992 if (failure_cnt)
3993 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3994 i - failure_cnt, fp->index);
3995
3996 fp->rx_bd_prod = ring_prod;
3997 /* Limit the CQE producer by the CQE ring size */
3998 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3999 cqe_ring_prod);
4000 fp->rx_pkt = fp->rx_calls = 0;
4001
15192a8c 4002 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4003
4004 return i - failure_cnt;
4005}
4006
4007static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4008{
4009 int i;
4010
4011 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4012 struct eth_rx_cqe_next_page *nextpg;
4013
4014 nextpg = (struct eth_rx_cqe_next_page *)
4015 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4016 nextpg->addr_hi =
4017 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4018 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4019 nextpg->addr_lo =
4020 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4021 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4022 }
4023}
4024
b3b83c3f
DK
4025static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4026{
4027 union host_hc_status_block *sb;
4028 struct bnx2x_fastpath *fp = &bp->fp[index];
4029 int ring_size = 0;
6383c0b3 4030 u8 cos;
c2188952 4031 int rx_ring_size = 0;
b3b83c3f 4032
a3348722
BW
4033 if (!bp->rx_ring_size &&
4034 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4035 rx_ring_size = MIN_RX_SIZE_NONTPA;
4036 bp->rx_ring_size = rx_ring_size;
55c11941 4037 } else if (!bp->rx_ring_size) {
c2188952
VZ
4038 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4039
065f8b92
YM
4040 if (CHIP_IS_E3(bp)) {
4041 u32 cfg = SHMEM_RD(bp,
4042 dev_info.port_hw_config[BP_PORT(bp)].
4043 default_cfg);
4044
4045 /* Decrease ring size for 1G functions */
4046 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4047 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4048 rx_ring_size /= 10;
4049 }
d760fc37 4050
c2188952
VZ
4051 /* allocate at least number of buffers required by FW */
4052 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4053 MIN_RX_SIZE_TPA, rx_ring_size);
4054
4055 bp->rx_ring_size = rx_ring_size;
614c76df 4056 } else /* if rx_ring_size specified - use it */
c2188952 4057 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4058
04c46736
YM
4059 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4060
b3b83c3f
DK
4061 /* Common */
4062 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4063
b3b83c3f 4064 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4065 /* status blocks */
619c5cb6 4066 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4067 BNX2X_PCI_ALLOC(sb->e2_sb,
4068 &bnx2x_fp(bp, index, status_blk_mapping),
4069 sizeof(struct host_hc_status_block_e2));
4070 else
4071 BNX2X_PCI_ALLOC(sb->e1x_sb,
4072 &bnx2x_fp(bp, index, status_blk_mapping),
4073 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4074 }
8eef2af1
DK
4075
4076 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4077 * set shortcuts for it.
4078 */
4079 if (!IS_FCOE_IDX(index))
4080 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4081
4082 /* Tx */
4083 if (!skip_tx_queue(bp, index)) {
4084 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4085 for_each_cos_in_tx_queue(fp, cos) {
65565884 4086 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4087
51c1a580
MS
4088 DP(NETIF_MSG_IFUP,
4089 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4090 index, cos);
4091
4092 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4093 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4094 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4095 &txdata->tx_desc_mapping,
b3b83c3f 4096 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4097 }
b3b83c3f
DK
4098 }
4099
4100 /* Rx */
4101 if (!skip_rx_queue(bp, index)) {
4102 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4103 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4104 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4105 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4106 &bnx2x_fp(bp, index, rx_desc_mapping),
4107 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4108
4109 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4110 &bnx2x_fp(bp, index, rx_comp_mapping),
4111 sizeof(struct eth_fast_path_rx_cqe) *
4112 NUM_RCQ_BD);
4113
4114 /* SGE ring */
4115 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4116 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4117 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4118 &bnx2x_fp(bp, index, rx_sge_mapping),
4119 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4120 /* RX BD ring */
4121 bnx2x_set_next_page_rx_bd(fp);
4122
4123 /* CQ ring */
4124 bnx2x_set_next_page_rx_cq(fp);
4125
4126 /* BDs */
4127 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4128 if (ring_size < rx_ring_size)
4129 goto alloc_mem_err;
4130 }
4131
4132 return 0;
4133
4134/* handles low memory cases */
4135alloc_mem_err:
4136 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4137 index, ring_size);
4138 /* FW will drop all packets if queue is not big enough,
4139 * In these cases we disable the queue
6383c0b3 4140 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4141 */
4142 if (ring_size < (fp->disable_tpa ?
eb722d7a 4143 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4144 /* release memory allocated for this queue */
4145 bnx2x_free_fp_mem_at(bp, index);
4146 return -ENOMEM;
4147 }
4148 return 0;
4149}
4150
55c11941
MS
4151int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4152{
4153 if (!NO_FCOE(bp))
4154 /* FCoE */
4155 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4156 /* we will fail load process instead of mark
4157 * NO_FCOE_FLAG
4158 */
4159 return -ENOMEM;
4160
4161 return 0;
4162}
4163
b3b83c3f
DK
4164int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4165{
4166 int i;
4167
55c11941
MS
4168 /* 1. Allocate FP for leading - fatal if error
4169 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4170 */
4171
4172 /* leading */
4173 if (bnx2x_alloc_fp_mem_at(bp, 0))
4174 return -ENOMEM;
6383c0b3 4175
b3b83c3f
DK
4176 /* RSS */
4177 for_each_nondefault_eth_queue(bp, i)
4178 if (bnx2x_alloc_fp_mem_at(bp, i))
4179 break;
4180
4181 /* handle memory failures */
4182 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4183 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4184
4185 WARN_ON(delta < 0);
4864a16a 4186 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4187 if (CNIC_SUPPORT(bp))
4188 /* move non eth FPs next to last eth FP
4189 * must be done in that order
4190 * FCOE_IDX < FWD_IDX < OOO_IDX
4191 */
b3b83c3f 4192
55c11941
MS
4193 /* move FCoE fp even NO_FCOE_FLAG is on */
4194 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4195 bp->num_ethernet_queues -= delta;
4196 bp->num_queues = bp->num_ethernet_queues +
4197 bp->num_cnic_queues;
b3b83c3f
DK
4198 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4199 bp->num_queues + delta, bp->num_queues);
4200 }
4201
4202 return 0;
4203}
d6214d7a 4204
523224a3
DK
4205void bnx2x_free_mem_bp(struct bnx2x *bp)
4206{
c3146eb6
DK
4207 int i;
4208
4209 for (i = 0; i < bp->fp_array_size; i++)
4210 kfree(bp->fp[i].tpa_info);
523224a3 4211 kfree(bp->fp);
15192a8c
BW
4212 kfree(bp->sp_objs);
4213 kfree(bp->fp_stats);
65565884 4214 kfree(bp->bnx2x_txq);
523224a3
DK
4215 kfree(bp->msix_table);
4216 kfree(bp->ilt);
4217}
4218
0329aba1 4219int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4220{
4221 struct bnx2x_fastpath *fp;
4222 struct msix_entry *tbl;
4223 struct bnx2x_ilt *ilt;
6383c0b3 4224 int msix_table_size = 0;
55c11941 4225 int fp_array_size, txq_array_size;
15192a8c 4226 int i;
6383c0b3
AE
4227
4228 /*
4229 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4230 * path IGU SBs plus default SB (for PF only).
6383c0b3 4231 */
1ab4434c
AE
4232 msix_table_size = bp->igu_sb_cnt;
4233 if (IS_PF(bp))
4234 msix_table_size++;
4235 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4236
6383c0b3 4237 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4238 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4239 bp->fp_array_size = fp_array_size;
4240 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4241
c3146eb6 4242 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4243 if (!fp)
4244 goto alloc_err;
c3146eb6 4245 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4246 fp[i].tpa_info =
4247 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4248 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4249 if (!(fp[i].tpa_info))
4250 goto alloc_err;
4251 }
4252
523224a3
DK
4253 bp->fp = fp;
4254
15192a8c 4255 /* allocate sp objs */
c3146eb6 4256 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4257 GFP_KERNEL);
4258 if (!bp->sp_objs)
4259 goto alloc_err;
4260
4261 /* allocate fp_stats */
c3146eb6 4262 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4263 GFP_KERNEL);
4264 if (!bp->fp_stats)
4265 goto alloc_err;
4266
65565884 4267 /* Allocate memory for the transmission queues array */
55c11941
MS
4268 txq_array_size =
4269 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4270 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4271
4272 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4273 GFP_KERNEL);
65565884
MS
4274 if (!bp->bnx2x_txq)
4275 goto alloc_err;
4276
523224a3 4277 /* msix table */
01e23742 4278 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4279 if (!tbl)
4280 goto alloc_err;
4281 bp->msix_table = tbl;
4282
4283 /* ilt */
4284 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4285 if (!ilt)
4286 goto alloc_err;
4287 bp->ilt = ilt;
4288
4289 return 0;
4290alloc_err:
4291 bnx2x_free_mem_bp(bp);
4292 return -ENOMEM;
4293
4294}
4295
a9fccec7 4296int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4297{
4298 struct bnx2x *bp = netdev_priv(dev);
4299
4300 if (unlikely(!netif_running(dev)))
4301 return 0;
4302
5d07d868 4303 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4304 return bnx2x_nic_load(bp, LOAD_NORMAL);
4305}
4306
1ac9e428
YR
4307int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4308{
4309 u32 sel_phy_idx = 0;
4310 if (bp->link_params.num_phys <= 1)
4311 return INT_PHY;
4312
4313 if (bp->link_vars.link_up) {
4314 sel_phy_idx = EXT_PHY1;
4315 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4316 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4317 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4318 sel_phy_idx = EXT_PHY2;
4319 } else {
4320
4321 switch (bnx2x_phy_selection(&bp->link_params)) {
4322 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4323 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4324 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4325 sel_phy_idx = EXT_PHY1;
4326 break;
4327 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4328 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4329 sel_phy_idx = EXT_PHY2;
4330 break;
4331 }
4332 }
4333
4334 return sel_phy_idx;
4335
4336}
4337int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4338{
4339 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4340 /*
2de67439 4341 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4342 * swapping is enabled). So when swapping is enabled, we need to reverse
4343 * the configuration
4344 */
4345
4346 if (bp->link_params.multi_phy_config &
4347 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4348 if (sel_phy_idx == EXT_PHY1)
4349 sel_phy_idx = EXT_PHY2;
4350 else if (sel_phy_idx == EXT_PHY2)
4351 sel_phy_idx = EXT_PHY1;
4352 }
4353 return LINK_CONFIG_IDX(sel_phy_idx);
4354}
4355
55c11941 4356#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4357int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4358{
4359 struct bnx2x *bp = netdev_priv(dev);
4360 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4361
4362 switch (type) {
4363 case NETDEV_FCOE_WWNN:
4364 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4365 cp->fcoe_wwn_node_name_lo);
4366 break;
4367 case NETDEV_FCOE_WWPN:
4368 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4369 cp->fcoe_wwn_port_name_lo);
4370 break;
4371 default:
51c1a580 4372 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4373 return -EINVAL;
4374 }
4375
4376 return 0;
4377}
4378#endif
4379
9f6c9258
DK
4380/* called with rtnl_lock */
4381int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4382{
4383 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4384
4385 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4386 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4387 return -EAGAIN;
4388 }
4389
4390 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4391 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4392 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4393 return -EINVAL;
51c1a580 4394 }
9f6c9258
DK
4395
4396 /* This does not race with packet allocation
4397 * because the actual alloc size is
4398 * only updated as part of load
4399 */
4400 dev->mtu = new_mtu;
4401
66371c44
MM
4402 return bnx2x_reload_if_running(dev);
4403}
4404
c8f44aff 4405netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4406 netdev_features_t features)
66371c44
MM
4407{
4408 struct bnx2x *bp = netdev_priv(dev);
4409
4410 /* TPA requires Rx CSUM offloading */
621b4d66 4411 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4412 features &= ~NETIF_F_LRO;
621b4d66
DK
4413 features &= ~NETIF_F_GRO;
4414 }
66371c44
MM
4415
4416 return features;
4417}
4418
c8f44aff 4419int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4420{
4421 struct bnx2x *bp = netdev_priv(dev);
4422 u32 flags = bp->flags;
538dd2e3 4423 bool bnx2x_reload = false;
66371c44
MM
4424
4425 if (features & NETIF_F_LRO)
4426 flags |= TPA_ENABLE_FLAG;
4427 else
4428 flags &= ~TPA_ENABLE_FLAG;
4429
621b4d66
DK
4430 if (features & NETIF_F_GRO)
4431 flags |= GRO_ENABLE_FLAG;
4432 else
4433 flags &= ~GRO_ENABLE_FLAG;
4434
538dd2e3
MB
4435 if (features & NETIF_F_LOOPBACK) {
4436 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4437 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4438 bnx2x_reload = true;
4439 }
4440 } else {
4441 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4442 bp->link_params.loopback_mode = LOOPBACK_NONE;
4443 bnx2x_reload = true;
4444 }
4445 }
4446
66371c44
MM
4447 if (flags ^ bp->flags) {
4448 bp->flags = flags;
538dd2e3
MB
4449 bnx2x_reload = true;
4450 }
66371c44 4451
538dd2e3 4452 if (bnx2x_reload) {
66371c44
MM
4453 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4454 return bnx2x_reload_if_running(dev);
4455 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4456 }
4457
66371c44 4458 return 0;
9f6c9258
DK
4459}
4460
4461void bnx2x_tx_timeout(struct net_device *dev)
4462{
4463 struct bnx2x *bp = netdev_priv(dev);
4464
4465#ifdef BNX2X_STOP_ON_ERROR
4466 if (!bp->panic)
4467 bnx2x_panic();
4468#endif
7be08a72
AE
4469
4470 smp_mb__before_clear_bit();
4471 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4472 smp_mb__after_clear_bit();
4473
9f6c9258 4474 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4475 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4476}
4477
9f6c9258
DK
4478int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4479{
4480 struct net_device *dev = pci_get_drvdata(pdev);
4481 struct bnx2x *bp;
4482
4483 if (!dev) {
4484 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4485 return -ENODEV;
4486 }
4487 bp = netdev_priv(dev);
4488
4489 rtnl_lock();
4490
4491 pci_save_state(pdev);
4492
4493 if (!netif_running(dev)) {
4494 rtnl_unlock();
4495 return 0;
4496 }
4497
4498 netif_device_detach(dev);
4499
5d07d868 4500 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4501
4502 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4503
4504 rtnl_unlock();
4505
4506 return 0;
4507}
4508
4509int bnx2x_resume(struct pci_dev *pdev)
4510{
4511 struct net_device *dev = pci_get_drvdata(pdev);
4512 struct bnx2x *bp;
4513 int rc;
4514
4515 if (!dev) {
4516 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4517 return -ENODEV;
4518 }
4519 bp = netdev_priv(dev);
4520
4521 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4522 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4523 return -EAGAIN;
4524 }
4525
4526 rtnl_lock();
4527
4528 pci_restore_state(pdev);
4529
4530 if (!netif_running(dev)) {
4531 rtnl_unlock();
4532 return 0;
4533 }
4534
4535 bnx2x_set_power_state(bp, PCI_D0);
4536 netif_device_attach(dev);
4537
4538 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4539
4540 rtnl_unlock();
4541
4542 return rc;
4543}
619c5cb6
VZ
4544
4545
4546void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4547 u32 cid)
4548{
4549 /* ustorm cxt validation */
4550 cxt->ustorm_ag_context.cdu_usage =
4551 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4552 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4553 /* xcontext validation */
4554 cxt->xstorm_ag_context.cdu_reserved =
4555 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4556 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4557}
4558
1191cb83
ED
4559static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4560 u8 fw_sb_id, u8 sb_index,
4561 u8 ticks)
619c5cb6
VZ
4562{
4563
4564 u32 addr = BAR_CSTRORM_INTMEM +
4565 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4566 REG_WR8(bp, addr, ticks);
51c1a580
MS
4567 DP(NETIF_MSG_IFUP,
4568 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4569 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4570}
4571
1191cb83
ED
4572static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4573 u16 fw_sb_id, u8 sb_index,
4574 u8 disable)
619c5cb6
VZ
4575{
4576 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4577 u32 addr = BAR_CSTRORM_INTMEM +
4578 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4579 u16 flags = REG_RD16(bp, addr);
4580 /* clear and set */
4581 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4582 flags |= enable_flag;
4583 REG_WR16(bp, addr, flags);
51c1a580
MS
4584 DP(NETIF_MSG_IFUP,
4585 "port %x fw_sb_id %d sb_index %d disable %d\n",
4586 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4587}
4588
4589void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4590 u8 sb_index, u8 disable, u16 usec)
4591{
4592 int port = BP_PORT(bp);
4593 u8 ticks = usec / BNX2X_BTR;
4594
4595 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4596
4597 disable = disable ? 1 : (usec ? 0 : 1);
4598 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4599}