Commit | Line | Data |
---|---|---|
9f6c9258 DK |
1 | /* bnx2x_cmn.c: Broadcom Everest network driver. |
2 | * | |
3 | * Copyright (c) 2007-2010 Broadcom Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | |
10 | * Written by: Eliezer Tamir | |
11 | * Based on code from Michael Chan's bnx2 driver | |
12 | * UDP CSUM errata workaround by Arik Gendelman | |
13 | * Slowpath and fastpath rework by Vladislav Zolotarov | |
14 | * Statistics and Link management by Yitchak Gertner | |
15 | * | |
16 | */ | |
17 | ||
18 | ||
19 | #include <linux/etherdevice.h> | |
20 | #include <linux/ip.h> | |
21 | #include <linux/ipv6.h> | |
22 | #include "bnx2x_cmn.h" | |
23 | ||
24 | #ifdef BCM_VLAN | |
25 | #include <linux/if_vlan.h> | |
26 | #endif | |
27 | ||
28 | static int bnx2x_poll(struct napi_struct *napi, int budget); | |
29 | ||
30 | /* free skb in the packet ring at pos idx | |
31 | * return idx of last bd freed | |
32 | */ | |
33 | static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |
34 | u16 idx) | |
35 | { | |
36 | struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; | |
37 | struct eth_tx_start_bd *tx_start_bd; | |
38 | struct eth_tx_bd *tx_data_bd; | |
39 | struct sk_buff *skb = tx_buf->skb; | |
40 | u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; | |
41 | int nbd; | |
42 | ||
43 | /* prefetch skb end pointer to speedup dev_kfree_skb() */ | |
44 | prefetch(&skb->end); | |
45 | ||
46 | DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", | |
47 | idx, tx_buf, skb); | |
48 | ||
49 | /* unmap first bd */ | |
50 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); | |
51 | tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; | |
52 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), | |
53 | BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); | |
54 | ||
55 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; | |
56 | #ifdef BNX2X_STOP_ON_ERROR | |
57 | if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { | |
58 | BNX2X_ERR("BAD nbd!\n"); | |
59 | bnx2x_panic(); | |
60 | } | |
61 | #endif | |
62 | new_cons = nbd + tx_buf->first_bd; | |
63 | ||
64 | /* Get the next bd */ | |
65 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | |
66 | ||
67 | /* Skip a parse bd... */ | |
68 | --nbd; | |
69 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | |
70 | ||
71 | /* ...and the TSO split header bd since they have no mapping */ | |
72 | if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { | |
73 | --nbd; | |
74 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | |
75 | } | |
76 | ||
77 | /* now free frags */ | |
78 | while (nbd > 0) { | |
79 | ||
80 | DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); | |
81 | tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; | |
82 | dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), | |
83 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); | |
84 | if (--nbd) | |
85 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | |
86 | } | |
87 | ||
88 | /* release skb */ | |
89 | WARN_ON(!skb); | |
90 | dev_kfree_skb(skb); | |
91 | tx_buf->first_bd = 0; | |
92 | tx_buf->skb = NULL; | |
93 | ||
94 | return new_cons; | |
95 | } | |
96 | ||
97 | int bnx2x_tx_int(struct bnx2x_fastpath *fp) | |
98 | { | |
99 | struct bnx2x *bp = fp->bp; | |
100 | struct netdev_queue *txq; | |
101 | u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; | |
102 | ||
103 | #ifdef BNX2X_STOP_ON_ERROR | |
104 | if (unlikely(bp->panic)) | |
105 | return -1; | |
106 | #endif | |
107 | ||
108 | txq = netdev_get_tx_queue(bp->dev, fp->index); | |
109 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | |
110 | sw_cons = fp->tx_pkt_cons; | |
111 | ||
112 | while (sw_cons != hw_cons) { | |
113 | u16 pkt_cons; | |
114 | ||
115 | pkt_cons = TX_BD(sw_cons); | |
116 | ||
117 | /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ | |
118 | ||
119 | DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n", | |
120 | hw_cons, sw_cons, pkt_cons); | |
121 | ||
122 | /* if (NEXT_TX_IDX(sw_cons) != hw_cons) { | |
123 | rmb(); | |
124 | prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb); | |
125 | } | |
126 | */ | |
127 | bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); | |
128 | sw_cons++; | |
129 | } | |
130 | ||
131 | fp->tx_pkt_cons = sw_cons; | |
132 | fp->tx_bd_cons = bd_cons; | |
133 | ||
134 | /* Need to make the tx_bd_cons update visible to start_xmit() | |
135 | * before checking for netif_tx_queue_stopped(). Without the | |
136 | * memory barrier, there is a small possibility that | |
137 | * start_xmit() will miss it and cause the queue to be stopped | |
138 | * forever. | |
139 | */ | |
140 | smp_mb(); | |
141 | ||
142 | /* TBD need a thresh? */ | |
143 | if (unlikely(netif_tx_queue_stopped(txq))) { | |
144 | /* Taking tx_lock() is needed to prevent reenabling the queue | |
145 | * while it's empty. This could have happen if rx_action() gets | |
146 | * suspended in bnx2x_tx_int() after the condition before | |
147 | * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): | |
148 | * | |
149 | * stops the queue->sees fresh tx_bd_cons->releases the queue-> | |
150 | * sends some packets consuming the whole queue again-> | |
151 | * stops the queue | |
152 | */ | |
153 | ||
154 | __netif_tx_lock(txq, smp_processor_id()); | |
155 | ||
156 | if ((netif_tx_queue_stopped(txq)) && | |
157 | (bp->state == BNX2X_STATE_OPEN) && | |
158 | (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) | |
159 | netif_tx_wake_queue(txq); | |
160 | ||
161 | __netif_tx_unlock(txq); | |
162 | } | |
163 | return 0; | |
164 | } | |
165 | ||
166 | static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, | |
167 | u16 idx) | |
168 | { | |
169 | u16 last_max = fp->last_max_sge; | |
170 | ||
171 | if (SUB_S16(idx, last_max) > 0) | |
172 | fp->last_max_sge = idx; | |
173 | } | |
174 | ||
175 | static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, | |
176 | struct eth_fast_path_rx_cqe *fp_cqe) | |
177 | { | |
178 | struct bnx2x *bp = fp->bp; | |
179 | u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - | |
180 | le16_to_cpu(fp_cqe->len_on_bd)) >> | |
181 | SGE_PAGE_SHIFT; | |
182 | u16 last_max, last_elem, first_elem; | |
183 | u16 delta = 0; | |
184 | u16 i; | |
185 | ||
186 | if (!sge_len) | |
187 | return; | |
188 | ||
189 | /* First mark all used pages */ | |
190 | for (i = 0; i < sge_len; i++) | |
191 | SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i]))); | |
192 | ||
193 | DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", | |
194 | sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); | |
195 | ||
196 | /* Here we assume that the last SGE index is the biggest */ | |
197 | prefetch((void *)(fp->sge_mask)); | |
198 | bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); | |
199 | ||
200 | last_max = RX_SGE(fp->last_max_sge); | |
201 | last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; | |
202 | first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT; | |
203 | ||
204 | /* If ring is not full */ | |
205 | if (last_elem + 1 != first_elem) | |
206 | last_elem++; | |
207 | ||
208 | /* Now update the prod */ | |
209 | for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { | |
210 | if (likely(fp->sge_mask[i])) | |
211 | break; | |
212 | ||
213 | fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK; | |
214 | delta += RX_SGE_MASK_ELEM_SZ; | |
215 | } | |
216 | ||
217 | if (delta > 0) { | |
218 | fp->rx_sge_prod += delta; | |
219 | /* clear page-end entries */ | |
220 | bnx2x_clear_sge_mask_next_elems(fp); | |
221 | } | |
222 | ||
223 | DP(NETIF_MSG_RX_STATUS, | |
224 | "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", | |
225 | fp->last_max_sge, fp->rx_sge_prod); | |
226 | } | |
227 | ||
228 | static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |
229 | struct sk_buff *skb, u16 cons, u16 prod) | |
230 | { | |
231 | struct bnx2x *bp = fp->bp; | |
232 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; | |
233 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; | |
234 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; | |
235 | dma_addr_t mapping; | |
236 | ||
237 | /* move empty skb from pool to prod and map it */ | |
238 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; | |
239 | mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, | |
240 | bp->rx_buf_size, DMA_FROM_DEVICE); | |
241 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); | |
242 | ||
243 | /* move partial skb from cons to pool (don't unmap yet) */ | |
244 | fp->tpa_pool[queue] = *cons_rx_buf; | |
245 | ||
246 | /* mark bin state as start - print error if current state != stop */ | |
247 | if (fp->tpa_state[queue] != BNX2X_TPA_STOP) | |
248 | BNX2X_ERR("start of bin not in stop [%d]\n", queue); | |
249 | ||
250 | fp->tpa_state[queue] = BNX2X_TPA_START; | |
251 | ||
252 | /* point prod_bd to new skb */ | |
253 | prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | |
254 | prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | |
255 | ||
256 | #ifdef BNX2X_STOP_ON_ERROR | |
257 | fp->tpa_queue_used |= (1 << queue); | |
258 | #ifdef _ASM_GENERIC_INT_L64_H | |
259 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", | |
260 | #else | |
261 | DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", | |
262 | #endif | |
263 | fp->tpa_queue_used); | |
264 | #endif | |
265 | } | |
266 | ||
267 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |
268 | struct sk_buff *skb, | |
269 | struct eth_fast_path_rx_cqe *fp_cqe, | |
270 | u16 cqe_idx) | |
271 | { | |
272 | struct sw_rx_page *rx_pg, old_rx_pg; | |
273 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | |
274 | u32 i, frag_len, frag_size, pages; | |
275 | int err; | |
276 | int j; | |
277 | ||
278 | frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; | |
279 | pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; | |
280 | ||
281 | /* This is needed in order to enable forwarding support */ | |
282 | if (frag_size) | |
283 | skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, | |
284 | max(frag_size, (u32)len_on_bd)); | |
285 | ||
286 | #ifdef BNX2X_STOP_ON_ERROR | |
287 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { | |
288 | BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", | |
289 | pages, cqe_idx); | |
290 | BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", | |
291 | fp_cqe->pkt_len, len_on_bd); | |
292 | bnx2x_panic(); | |
293 | return -EINVAL; | |
294 | } | |
295 | #endif | |
296 | ||
297 | /* Run through the SGL and compose the fragmented skb */ | |
298 | for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { | |
299 | u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j])); | |
300 | ||
301 | /* FW gives the indices of the SGE as if the ring is an array | |
302 | (meaning that "next" element will consume 2 indices) */ | |
303 | frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); | |
304 | rx_pg = &fp->rx_page_ring[sge_idx]; | |
305 | old_rx_pg = *rx_pg; | |
306 | ||
307 | /* If we fail to allocate a substitute page, we simply stop | |
308 | where we are and drop the whole packet */ | |
309 | err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); | |
310 | if (unlikely(err)) { | |
311 | fp->eth_q_stats.rx_skb_alloc_failed++; | |
312 | return err; | |
313 | } | |
314 | ||
315 | /* Unmap the page as we r going to pass it to the stack */ | |
316 | dma_unmap_page(&bp->pdev->dev, | |
317 | dma_unmap_addr(&old_rx_pg, mapping), | |
318 | SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); | |
319 | ||
320 | /* Add one frag and update the appropriate fields in the skb */ | |
321 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); | |
322 | ||
323 | skb->data_len += frag_len; | |
324 | skb->truesize += frag_len; | |
325 | skb->len += frag_len; | |
326 | ||
327 | frag_size -= frag_len; | |
328 | } | |
329 | ||
330 | return 0; | |
331 | } | |
332 | ||
333 | static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |
334 | u16 queue, int pad, int len, union eth_rx_cqe *cqe, | |
335 | u16 cqe_idx) | |
336 | { | |
337 | struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; | |
338 | struct sk_buff *skb = rx_buf->skb; | |
339 | /* alloc new skb */ | |
340 | struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | |
341 | ||
342 | /* Unmap skb in the pool anyway, as we are going to change | |
343 | pool entry status to BNX2X_TPA_STOP even if new skb allocation | |
344 | fails. */ | |
345 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), | |
346 | bp->rx_buf_size, DMA_FROM_DEVICE); | |
347 | ||
348 | if (likely(new_skb)) { | |
349 | /* fix ip xsum and give it to the stack */ | |
350 | /* (no need to map the new skb) */ | |
351 | #ifdef BCM_VLAN | |
352 | int is_vlan_cqe = | |
353 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | |
354 | PARSING_FLAGS_VLAN); | |
355 | int is_not_hwaccel_vlan_cqe = | |
356 | (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG))); | |
357 | #endif | |
358 | ||
359 | prefetch(skb); | |
360 | prefetch(((char *)(skb)) + 128); | |
361 | ||
362 | #ifdef BNX2X_STOP_ON_ERROR | |
363 | if (pad + len > bp->rx_buf_size) { | |
364 | BNX2X_ERR("skb_put is about to fail... " | |
365 | "pad %d len %d rx_buf_size %d\n", | |
366 | pad, len, bp->rx_buf_size); | |
367 | bnx2x_panic(); | |
368 | return; | |
369 | } | |
370 | #endif | |
371 | ||
372 | skb_reserve(skb, pad); | |
373 | skb_put(skb, len); | |
374 | ||
375 | skb->protocol = eth_type_trans(skb, bp->dev); | |
376 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
377 | ||
378 | { | |
379 | struct iphdr *iph; | |
380 | ||
381 | iph = (struct iphdr *)skb->data; | |
382 | #ifdef BCM_VLAN | |
383 | /* If there is no Rx VLAN offloading - | |
384 | take VLAN tag into an account */ | |
385 | if (unlikely(is_not_hwaccel_vlan_cqe)) | |
386 | iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN); | |
387 | #endif | |
388 | iph->check = 0; | |
389 | iph->check = ip_fast_csum((u8 *)iph, iph->ihl); | |
390 | } | |
391 | ||
392 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | |
393 | &cqe->fast_path_cqe, cqe_idx)) { | |
394 | #ifdef BCM_VLAN | |
395 | if ((bp->vlgrp != NULL) && is_vlan_cqe && | |
396 | (!is_not_hwaccel_vlan_cqe)) | |
397 | vlan_gro_receive(&fp->napi, bp->vlgrp, | |
398 | le16_to_cpu(cqe->fast_path_cqe. | |
399 | vlan_tag), skb); | |
400 | else | |
401 | #endif | |
402 | napi_gro_receive(&fp->napi, skb); | |
403 | } else { | |
404 | DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" | |
405 | " - dropping packet!\n"); | |
406 | dev_kfree_skb(skb); | |
407 | } | |
408 | ||
409 | ||
410 | /* put new skb in bin */ | |
411 | fp->tpa_pool[queue].skb = new_skb; | |
412 | ||
413 | } else { | |
414 | /* else drop the packet and keep the buffer in the bin */ | |
415 | DP(NETIF_MSG_RX_STATUS, | |
416 | "Failed to allocate new skb - dropping packet!\n"); | |
417 | fp->eth_q_stats.rx_skb_alloc_failed++; | |
418 | } | |
419 | ||
420 | fp->tpa_state[queue] = BNX2X_TPA_STOP; | |
421 | } | |
422 | ||
423 | /* Set Toeplitz hash value in the skb using the value from the | |
424 | * CQE (calculated by HW). | |
425 | */ | |
426 | static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe, | |
427 | struct sk_buff *skb) | |
428 | { | |
429 | /* Set Toeplitz hash from CQE */ | |
430 | if ((bp->dev->features & NETIF_F_RXHASH) && | |
431 | (cqe->fast_path_cqe.status_flags & | |
432 | ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) | |
433 | skb->rxhash = | |
434 | le32_to_cpu(cqe->fast_path_cqe.rss_hash_result); | |
435 | } | |
436 | ||
437 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |
438 | { | |
439 | struct bnx2x *bp = fp->bp; | |
440 | u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; | |
441 | u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; | |
442 | int rx_pkt = 0; | |
443 | ||
444 | #ifdef BNX2X_STOP_ON_ERROR | |
445 | if (unlikely(bp->panic)) | |
446 | return 0; | |
447 | #endif | |
448 | ||
449 | /* CQ "next element" is of the size of the regular element, | |
450 | that's why it's ok here */ | |
451 | hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); | |
452 | if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | |
453 | hw_comp_cons++; | |
454 | ||
455 | bd_cons = fp->rx_bd_cons; | |
456 | bd_prod = fp->rx_bd_prod; | |
457 | bd_prod_fw = bd_prod; | |
458 | sw_comp_cons = fp->rx_comp_cons; | |
459 | sw_comp_prod = fp->rx_comp_prod; | |
460 | ||
461 | /* Memory barrier necessary as speculative reads of the rx | |
462 | * buffer can be ahead of the index in the status block | |
463 | */ | |
464 | rmb(); | |
465 | ||
466 | DP(NETIF_MSG_RX_STATUS, | |
467 | "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", | |
468 | fp->index, hw_comp_cons, sw_comp_cons); | |
469 | ||
470 | while (sw_comp_cons != hw_comp_cons) { | |
471 | struct sw_rx_bd *rx_buf = NULL; | |
472 | struct sk_buff *skb; | |
473 | union eth_rx_cqe *cqe; | |
474 | u8 cqe_fp_flags; | |
475 | u16 len, pad; | |
476 | ||
477 | comp_ring_cons = RCQ_BD(sw_comp_cons); | |
478 | bd_prod = RX_BD(bd_prod); | |
479 | bd_cons = RX_BD(bd_cons); | |
480 | ||
481 | /* Prefetch the page containing the BD descriptor | |
482 | at producer's index. It will be needed when new skb is | |
483 | allocated */ | |
484 | prefetch((void *)(PAGE_ALIGN((unsigned long) | |
485 | (&fp->rx_desc_ring[bd_prod])) - | |
486 | PAGE_SIZE + 1)); | |
487 | ||
488 | cqe = &fp->rx_comp_ring[comp_ring_cons]; | |
489 | cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; | |
490 | ||
491 | DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" | |
492 | " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), | |
493 | cqe_fp_flags, cqe->fast_path_cqe.status_flags, | |
494 | le32_to_cpu(cqe->fast_path_cqe.rss_hash_result), | |
495 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), | |
496 | le16_to_cpu(cqe->fast_path_cqe.pkt_len)); | |
497 | ||
498 | /* is this a slowpath msg? */ | |
499 | if (unlikely(CQE_TYPE(cqe_fp_flags))) { | |
500 | bnx2x_sp_event(fp, cqe); | |
501 | goto next_cqe; | |
502 | ||
503 | /* this is an rx packet */ | |
504 | } else { | |
505 | rx_buf = &fp->rx_buf_ring[bd_cons]; | |
506 | skb = rx_buf->skb; | |
507 | prefetch(skb); | |
508 | len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); | |
509 | pad = cqe->fast_path_cqe.placement_offset; | |
510 | ||
511 | /* If CQE is marked both TPA_START and TPA_END | |
512 | it is a non-TPA CQE */ | |
513 | if ((!fp->disable_tpa) && | |
514 | (TPA_TYPE(cqe_fp_flags) != | |
515 | (TPA_TYPE_START | TPA_TYPE_END))) { | |
516 | u16 queue = cqe->fast_path_cqe.queue_index; | |
517 | ||
518 | if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { | |
519 | DP(NETIF_MSG_RX_STATUS, | |
520 | "calling tpa_start on queue %d\n", | |
521 | queue); | |
522 | ||
523 | bnx2x_tpa_start(fp, queue, skb, | |
524 | bd_cons, bd_prod); | |
525 | ||
526 | /* Set Toeplitz hash for an LRO skb */ | |
527 | bnx2x_set_skb_rxhash(bp, cqe, skb); | |
528 | ||
529 | goto next_rx; | |
530 | } | |
531 | ||
532 | if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) { | |
533 | DP(NETIF_MSG_RX_STATUS, | |
534 | "calling tpa_stop on queue %d\n", | |
535 | queue); | |
536 | ||
537 | if (!BNX2X_RX_SUM_FIX(cqe)) | |
538 | BNX2X_ERR("STOP on none TCP " | |
539 | "data\n"); | |
540 | ||
541 | /* This is a size of the linear data | |
542 | on this skb */ | |
543 | len = le16_to_cpu(cqe->fast_path_cqe. | |
544 | len_on_bd); | |
545 | bnx2x_tpa_stop(bp, fp, queue, pad, | |
546 | len, cqe, comp_ring_cons); | |
547 | #ifdef BNX2X_STOP_ON_ERROR | |
548 | if (bp->panic) | |
549 | return 0; | |
550 | #endif | |
551 | ||
552 | bnx2x_update_sge_prod(fp, | |
553 | &cqe->fast_path_cqe); | |
554 | goto next_cqe; | |
555 | } | |
556 | } | |
557 | ||
558 | dma_sync_single_for_device(&bp->pdev->dev, | |
559 | dma_unmap_addr(rx_buf, mapping), | |
560 | pad + RX_COPY_THRESH, | |
561 | DMA_FROM_DEVICE); | |
562 | prefetch(((char *)(skb)) + 128); | |
563 | ||
564 | /* is this an error packet? */ | |
565 | if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { | |
566 | DP(NETIF_MSG_RX_ERR, | |
567 | "ERROR flags %x rx packet %u\n", | |
568 | cqe_fp_flags, sw_comp_cons); | |
569 | fp->eth_q_stats.rx_err_discard_pkt++; | |
570 | goto reuse_rx; | |
571 | } | |
572 | ||
573 | /* Since we don't have a jumbo ring | |
574 | * copy small packets if mtu > 1500 | |
575 | */ | |
576 | if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && | |
577 | (len <= RX_COPY_THRESH)) { | |
578 | struct sk_buff *new_skb; | |
579 | ||
580 | new_skb = netdev_alloc_skb(bp->dev, | |
581 | len + pad); | |
582 | if (new_skb == NULL) { | |
583 | DP(NETIF_MSG_RX_ERR, | |
584 | "ERROR packet dropped " | |
585 | "because of alloc failure\n"); | |
586 | fp->eth_q_stats.rx_skb_alloc_failed++; | |
587 | goto reuse_rx; | |
588 | } | |
589 | ||
590 | /* aligned copy */ | |
591 | skb_copy_from_linear_data_offset(skb, pad, | |
592 | new_skb->data + pad, len); | |
593 | skb_reserve(new_skb, pad); | |
594 | skb_put(new_skb, len); | |
595 | ||
596 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); | |
597 | ||
598 | skb = new_skb; | |
599 | ||
600 | } else | |
601 | if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { | |
602 | dma_unmap_single(&bp->pdev->dev, | |
603 | dma_unmap_addr(rx_buf, mapping), | |
604 | bp->rx_buf_size, | |
605 | DMA_FROM_DEVICE); | |
606 | skb_reserve(skb, pad); | |
607 | skb_put(skb, len); | |
608 | ||
609 | } else { | |
610 | DP(NETIF_MSG_RX_ERR, | |
611 | "ERROR packet dropped because " | |
612 | "of alloc failure\n"); | |
613 | fp->eth_q_stats.rx_skb_alloc_failed++; | |
614 | reuse_rx: | |
615 | bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); | |
616 | goto next_rx; | |
617 | } | |
618 | ||
619 | skb->protocol = eth_type_trans(skb, bp->dev); | |
620 | ||
621 | /* Set Toeplitz hash for a none-LRO skb */ | |
622 | bnx2x_set_skb_rxhash(bp, cqe, skb); | |
623 | ||
624 | skb->ip_summed = CHECKSUM_NONE; | |
625 | if (bp->rx_csum) { | |
626 | if (likely(BNX2X_RX_CSUM_OK(cqe))) | |
627 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
628 | else | |
629 | fp->eth_q_stats.hw_csum_err++; | |
630 | } | |
631 | } | |
632 | ||
633 | skb_record_rx_queue(skb, fp->index); | |
634 | ||
635 | #ifdef BCM_VLAN | |
636 | if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && | |
637 | (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & | |
638 | PARSING_FLAGS_VLAN)) | |
639 | vlan_gro_receive(&fp->napi, bp->vlgrp, | |
640 | le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb); | |
641 | else | |
642 | #endif | |
643 | napi_gro_receive(&fp->napi, skb); | |
644 | ||
645 | ||
646 | next_rx: | |
647 | rx_buf->skb = NULL; | |
648 | ||
649 | bd_cons = NEXT_RX_IDX(bd_cons); | |
650 | bd_prod = NEXT_RX_IDX(bd_prod); | |
651 | bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); | |
652 | rx_pkt++; | |
653 | next_cqe: | |
654 | sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); | |
655 | sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); | |
656 | ||
657 | if (rx_pkt == budget) | |
658 | break; | |
659 | } /* while */ | |
660 | ||
661 | fp->rx_bd_cons = bd_cons; | |
662 | fp->rx_bd_prod = bd_prod_fw; | |
663 | fp->rx_comp_cons = sw_comp_cons; | |
664 | fp->rx_comp_prod = sw_comp_prod; | |
665 | ||
666 | /* Update producers */ | |
667 | bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, | |
668 | fp->rx_sge_prod); | |
669 | ||
670 | fp->rx_pkt += rx_pkt; | |
671 | fp->rx_calls++; | |
672 | ||
673 | return rx_pkt; | |
674 | } | |
675 | ||
676 | static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) | |
677 | { | |
678 | struct bnx2x_fastpath *fp = fp_cookie; | |
679 | struct bnx2x *bp = fp->bp; | |
680 | ||
681 | /* Return here if interrupt is disabled */ | |
682 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | |
683 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | |
684 | return IRQ_HANDLED; | |
685 | } | |
686 | ||
687 | DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", | |
688 | fp->index, fp->sb_id); | |
689 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); | |
690 | ||
691 | #ifdef BNX2X_STOP_ON_ERROR | |
692 | if (unlikely(bp->panic)) | |
693 | return IRQ_HANDLED; | |
694 | #endif | |
695 | ||
696 | /* Handle Rx and Tx according to MSI-X vector */ | |
697 | prefetch(fp->rx_cons_sb); | |
698 | prefetch(fp->tx_cons_sb); | |
699 | prefetch(&fp->status_blk->u_status_block.status_block_index); | |
700 | prefetch(&fp->status_blk->c_status_block.status_block_index); | |
701 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | |
702 | ||
703 | return IRQ_HANDLED; | |
704 | } | |
705 | ||
706 | ||
707 | /* HW Lock for shared dual port PHYs */ | |
708 | void bnx2x_acquire_phy_lock(struct bnx2x *bp) | |
709 | { | |
710 | mutex_lock(&bp->port.phy_mutex); | |
711 | ||
712 | if (bp->port.need_hw_lock) | |
713 | bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); | |
714 | } | |
715 | ||
716 | void bnx2x_release_phy_lock(struct bnx2x *bp) | |
717 | { | |
718 | if (bp->port.need_hw_lock) | |
719 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); | |
720 | ||
721 | mutex_unlock(&bp->port.phy_mutex); | |
722 | } | |
723 | ||
724 | void bnx2x_link_report(struct bnx2x *bp) | |
725 | { | |
726 | if (bp->flags & MF_FUNC_DIS) { | |
727 | netif_carrier_off(bp->dev); | |
728 | netdev_err(bp->dev, "NIC Link is Down\n"); | |
729 | return; | |
730 | } | |
731 | ||
732 | if (bp->link_vars.link_up) { | |
733 | u16 line_speed; | |
734 | ||
735 | if (bp->state == BNX2X_STATE_OPEN) | |
736 | netif_carrier_on(bp->dev); | |
737 | netdev_info(bp->dev, "NIC Link is Up, "); | |
738 | ||
739 | line_speed = bp->link_vars.line_speed; | |
740 | if (IS_E1HMF(bp)) { | |
741 | u16 vn_max_rate; | |
742 | ||
743 | vn_max_rate = | |
744 | ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> | |
745 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | |
746 | if (vn_max_rate < line_speed) | |
747 | line_speed = vn_max_rate; | |
748 | } | |
749 | pr_cont("%d Mbps ", line_speed); | |
750 | ||
751 | if (bp->link_vars.duplex == DUPLEX_FULL) | |
752 | pr_cont("full duplex"); | |
753 | else | |
754 | pr_cont("half duplex"); | |
755 | ||
756 | if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) { | |
757 | if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) { | |
758 | pr_cont(", receive "); | |
759 | if (bp->link_vars.flow_ctrl & | |
760 | BNX2X_FLOW_CTRL_TX) | |
761 | pr_cont("& transmit "); | |
762 | } else { | |
763 | pr_cont(", transmit "); | |
764 | } | |
765 | pr_cont("flow control ON"); | |
766 | } | |
767 | pr_cont("\n"); | |
768 | ||
769 | } else { /* link_down */ | |
770 | netif_carrier_off(bp->dev); | |
771 | netdev_err(bp->dev, "NIC Link is Down\n"); | |
772 | } | |
773 | } | |
774 | ||
775 | void bnx2x_init_rx_rings(struct bnx2x *bp) | |
776 | { | |
777 | int func = BP_FUNC(bp); | |
778 | int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : | |
779 | ETH_MAX_AGGREGATION_QUEUES_E1H; | |
780 | u16 ring_prod, cqe_ring_prod; | |
781 | int i, j; | |
782 | ||
783 | bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN; | |
784 | DP(NETIF_MSG_IFUP, | |
785 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); | |
786 | ||
787 | if (bp->flags & TPA_ENABLE_FLAG) { | |
788 | ||
789 | for_each_queue(bp, j) { | |
790 | struct bnx2x_fastpath *fp = &bp->fp[j]; | |
791 | ||
792 | for (i = 0; i < max_agg_queues; i++) { | |
793 | fp->tpa_pool[i].skb = | |
794 | netdev_alloc_skb(bp->dev, bp->rx_buf_size); | |
795 | if (!fp->tpa_pool[i].skb) { | |
796 | BNX2X_ERR("Failed to allocate TPA " | |
797 | "skb pool for queue[%d] - " | |
798 | "disabling TPA on this " | |
799 | "queue!\n", j); | |
800 | bnx2x_free_tpa_pool(bp, fp, i); | |
801 | fp->disable_tpa = 1; | |
802 | break; | |
803 | } | |
804 | dma_unmap_addr_set((struct sw_rx_bd *) | |
805 | &bp->fp->tpa_pool[i], | |
806 | mapping, 0); | |
807 | fp->tpa_state[i] = BNX2X_TPA_STOP; | |
808 | } | |
809 | } | |
810 | } | |
811 | ||
812 | for_each_queue(bp, j) { | |
813 | struct bnx2x_fastpath *fp = &bp->fp[j]; | |
814 | ||
815 | fp->rx_bd_cons = 0; | |
816 | fp->rx_cons_sb = BNX2X_RX_SB_INDEX; | |
817 | fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; | |
818 | ||
819 | /* "next page" elements initialization */ | |
820 | /* SGE ring */ | |
821 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { | |
822 | struct eth_rx_sge *sge; | |
823 | ||
824 | sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; | |
825 | sge->addr_hi = | |
826 | cpu_to_le32(U64_HI(fp->rx_sge_mapping + | |
827 | BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); | |
828 | sge->addr_lo = | |
829 | cpu_to_le32(U64_LO(fp->rx_sge_mapping + | |
830 | BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); | |
831 | } | |
832 | ||
833 | bnx2x_init_sge_ring_bit_mask(fp); | |
834 | ||
835 | /* RX BD ring */ | |
836 | for (i = 1; i <= NUM_RX_RINGS; i++) { | |
837 | struct eth_rx_bd *rx_bd; | |
838 | ||
839 | rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; | |
840 | rx_bd->addr_hi = | |
841 | cpu_to_le32(U64_HI(fp->rx_desc_mapping + | |
842 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); | |
843 | rx_bd->addr_lo = | |
844 | cpu_to_le32(U64_LO(fp->rx_desc_mapping + | |
845 | BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); | |
846 | } | |
847 | ||
848 | /* CQ ring */ | |
849 | for (i = 1; i <= NUM_RCQ_RINGS; i++) { | |
850 | struct eth_rx_cqe_next_page *nextpg; | |
851 | ||
852 | nextpg = (struct eth_rx_cqe_next_page *) | |
853 | &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; | |
854 | nextpg->addr_hi = | |
855 | cpu_to_le32(U64_HI(fp->rx_comp_mapping + | |
856 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); | |
857 | nextpg->addr_lo = | |
858 | cpu_to_le32(U64_LO(fp->rx_comp_mapping + | |
859 | BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); | |
860 | } | |
861 | ||
862 | /* Allocate SGEs and initialize the ring elements */ | |
863 | for (i = 0, ring_prod = 0; | |
864 | i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { | |
865 | ||
866 | if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { | |
867 | BNX2X_ERR("was only able to allocate " | |
868 | "%d rx sges\n", i); | |
869 | BNX2X_ERR("disabling TPA for queue[%d]\n", j); | |
870 | /* Cleanup already allocated elements */ | |
871 | bnx2x_free_rx_sge_range(bp, fp, ring_prod); | |
872 | bnx2x_free_tpa_pool(bp, fp, max_agg_queues); | |
873 | fp->disable_tpa = 1; | |
874 | ring_prod = 0; | |
875 | break; | |
876 | } | |
877 | ring_prod = NEXT_SGE_IDX(ring_prod); | |
878 | } | |
879 | fp->rx_sge_prod = ring_prod; | |
880 | ||
881 | /* Allocate BDs and initialize BD ring */ | |
882 | fp->rx_comp_cons = 0; | |
883 | cqe_ring_prod = ring_prod = 0; | |
884 | for (i = 0; i < bp->rx_ring_size; i++) { | |
885 | if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { | |
886 | BNX2X_ERR("was only able to allocate " | |
887 | "%d rx skbs on queue[%d]\n", i, j); | |
888 | fp->eth_q_stats.rx_skb_alloc_failed++; | |
889 | break; | |
890 | } | |
891 | ring_prod = NEXT_RX_IDX(ring_prod); | |
892 | cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); | |
893 | WARN_ON(ring_prod <= i); | |
894 | } | |
895 | ||
896 | fp->rx_bd_prod = ring_prod; | |
897 | /* must not have more available CQEs than BDs */ | |
898 | fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, | |
899 | cqe_ring_prod); | |
900 | fp->rx_pkt = fp->rx_calls = 0; | |
901 | ||
902 | /* Warning! | |
903 | * this will generate an interrupt (to the TSTORM) | |
904 | * must only be done after chip is initialized | |
905 | */ | |
906 | bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod, | |
907 | fp->rx_sge_prod); | |
908 | if (j != 0) | |
909 | continue; | |
910 | ||
911 | REG_WR(bp, BAR_USTRORM_INTMEM + | |
912 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), | |
913 | U64_LO(fp->rx_comp_mapping)); | |
914 | REG_WR(bp, BAR_USTRORM_INTMEM + | |
915 | USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, | |
916 | U64_HI(fp->rx_comp_mapping)); | |
917 | } | |
918 | } | |
919 | static void bnx2x_free_tx_skbs(struct bnx2x *bp) | |
920 | { | |
921 | int i; | |
922 | ||
923 | for_each_queue(bp, i) { | |
924 | struct bnx2x_fastpath *fp = &bp->fp[i]; | |
925 | ||
926 | u16 bd_cons = fp->tx_bd_cons; | |
927 | u16 sw_prod = fp->tx_pkt_prod; | |
928 | u16 sw_cons = fp->tx_pkt_cons; | |
929 | ||
930 | while (sw_cons != sw_prod) { | |
931 | bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); | |
932 | sw_cons++; | |
933 | } | |
934 | } | |
935 | } | |
936 | ||
937 | static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |
938 | { | |
939 | int i, j; | |
940 | ||
941 | for_each_queue(bp, j) { | |
942 | struct bnx2x_fastpath *fp = &bp->fp[j]; | |
943 | ||
944 | for (i = 0; i < NUM_RX_BD; i++) { | |
945 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; | |
946 | struct sk_buff *skb = rx_buf->skb; | |
947 | ||
948 | if (skb == NULL) | |
949 | continue; | |
950 | ||
951 | dma_unmap_single(&bp->pdev->dev, | |
952 | dma_unmap_addr(rx_buf, mapping), | |
953 | bp->rx_buf_size, DMA_FROM_DEVICE); | |
954 | ||
955 | rx_buf->skb = NULL; | |
956 | dev_kfree_skb(skb); | |
957 | } | |
958 | if (!fp->disable_tpa) | |
959 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? | |
960 | ETH_MAX_AGGREGATION_QUEUES_E1 : | |
961 | ETH_MAX_AGGREGATION_QUEUES_E1H); | |
962 | } | |
963 | } | |
964 | ||
965 | void bnx2x_free_skbs(struct bnx2x *bp) | |
966 | { | |
967 | bnx2x_free_tx_skbs(bp); | |
968 | bnx2x_free_rx_skbs(bp); | |
969 | } | |
970 | ||
971 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) | |
972 | { | |
973 | int i, offset = 1; | |
974 | ||
975 | free_irq(bp->msix_table[0].vector, bp->dev); | |
976 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", | |
977 | bp->msix_table[0].vector); | |
978 | ||
979 | #ifdef BCM_CNIC | |
980 | offset++; | |
981 | #endif | |
982 | for_each_queue(bp, i) { | |
983 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " | |
984 | "state %x\n", i, bp->msix_table[i + offset].vector, | |
985 | bnx2x_fp(bp, i, state)); | |
986 | ||
987 | free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); | |
988 | } | |
989 | } | |
990 | ||
991 | void bnx2x_free_irq(struct bnx2x *bp, bool disable_only) | |
992 | { | |
993 | if (bp->flags & USING_MSIX_FLAG) { | |
994 | if (!disable_only) | |
995 | bnx2x_free_msix_irqs(bp); | |
996 | pci_disable_msix(bp->pdev); | |
997 | bp->flags &= ~USING_MSIX_FLAG; | |
998 | ||
999 | } else if (bp->flags & USING_MSI_FLAG) { | |
1000 | if (!disable_only) | |
1001 | free_irq(bp->pdev->irq, bp->dev); | |
1002 | pci_disable_msi(bp->pdev); | |
1003 | bp->flags &= ~USING_MSI_FLAG; | |
1004 | ||
1005 | } else if (!disable_only) | |
1006 | free_irq(bp->pdev->irq, bp->dev); | |
1007 | } | |
1008 | ||
1009 | static int bnx2x_enable_msix(struct bnx2x *bp) | |
1010 | { | |
1011 | int i, rc, offset = 1; | |
1012 | int igu_vec = 0; | |
1013 | ||
1014 | bp->msix_table[0].entry = igu_vec; | |
1015 | DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); | |
1016 | ||
1017 | #ifdef BCM_CNIC | |
1018 | igu_vec = BP_L_ID(bp) + offset; | |
1019 | bp->msix_table[1].entry = igu_vec; | |
1020 | DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec); | |
1021 | offset++; | |
1022 | #endif | |
1023 | for_each_queue(bp, i) { | |
1024 | igu_vec = BP_L_ID(bp) + offset + i; | |
1025 | bp->msix_table[i + offset].entry = igu_vec; | |
1026 | DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " | |
1027 | "(fastpath #%u)\n", i + offset, igu_vec, i); | |
1028 | } | |
1029 | ||
1030 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], | |
1031 | BNX2X_NUM_QUEUES(bp) + offset); | |
1032 | ||
1033 | /* | |
1034 | * reconfigure number of tx/rx queues according to available | |
1035 | * MSI-X vectors | |
1036 | */ | |
1037 | if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { | |
1038 | /* vectors available for FP */ | |
1039 | int fp_vec = rc - BNX2X_MSIX_VEC_FP_START; | |
1040 | ||
1041 | DP(NETIF_MSG_IFUP, | |
1042 | "Trying to use less MSI-X vectors: %d\n", rc); | |
1043 | ||
1044 | rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); | |
1045 | ||
1046 | if (rc) { | |
1047 | DP(NETIF_MSG_IFUP, | |
1048 | "MSI-X is not attainable rc %d\n", rc); | |
1049 | return rc; | |
1050 | } | |
1051 | ||
1052 | bp->num_queues = min(bp->num_queues, fp_vec); | |
1053 | ||
1054 | DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", | |
1055 | bp->num_queues); | |
1056 | } else if (rc) { | |
1057 | DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); | |
1058 | return rc; | |
1059 | } | |
1060 | ||
1061 | bp->flags |= USING_MSIX_FLAG; | |
1062 | ||
1063 | return 0; | |
1064 | } | |
1065 | ||
1066 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |
1067 | { | |
1068 | int i, rc, offset = 1; | |
1069 | ||
1070 | rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, | |
1071 | bp->dev->name, bp->dev); | |
1072 | if (rc) { | |
1073 | BNX2X_ERR("request sp irq failed\n"); | |
1074 | return -EBUSY; | |
1075 | } | |
1076 | ||
1077 | #ifdef BCM_CNIC | |
1078 | offset++; | |
1079 | #endif | |
1080 | for_each_queue(bp, i) { | |
1081 | struct bnx2x_fastpath *fp = &bp->fp[i]; | |
1082 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", | |
1083 | bp->dev->name, i); | |
1084 | ||
1085 | rc = request_irq(bp->msix_table[i + offset].vector, | |
1086 | bnx2x_msix_fp_int, 0, fp->name, fp); | |
1087 | if (rc) { | |
1088 | BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); | |
1089 | bnx2x_free_msix_irqs(bp); | |
1090 | return -EBUSY; | |
1091 | } | |
1092 | ||
1093 | fp->state = BNX2X_FP_STATE_IRQ; | |
1094 | } | |
1095 | ||
1096 | i = BNX2X_NUM_QUEUES(bp); | |
1097 | netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" | |
1098 | " ... fp[%d] %d\n", | |
1099 | bp->msix_table[0].vector, | |
1100 | 0, bp->msix_table[offset].vector, | |
1101 | i - 1, bp->msix_table[offset + i - 1].vector); | |
1102 | ||
1103 | return 0; | |
1104 | } | |
1105 | ||
1106 | static int bnx2x_enable_msi(struct bnx2x *bp) | |
1107 | { | |
1108 | int rc; | |
1109 | ||
1110 | rc = pci_enable_msi(bp->pdev); | |
1111 | if (rc) { | |
1112 | DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); | |
1113 | return -1; | |
1114 | } | |
1115 | bp->flags |= USING_MSI_FLAG; | |
1116 | ||
1117 | return 0; | |
1118 | } | |
1119 | ||
1120 | static int bnx2x_req_irq(struct bnx2x *bp) | |
1121 | { | |
1122 | unsigned long flags; | |
1123 | int rc; | |
1124 | ||
1125 | if (bp->flags & USING_MSI_FLAG) | |
1126 | flags = 0; | |
1127 | else | |
1128 | flags = IRQF_SHARED; | |
1129 | ||
1130 | rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, | |
1131 | bp->dev->name, bp->dev); | |
1132 | if (!rc) | |
1133 | bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; | |
1134 | ||
1135 | return rc; | |
1136 | } | |
1137 | ||
1138 | static void bnx2x_napi_enable(struct bnx2x *bp) | |
1139 | { | |
1140 | int i; | |
1141 | ||
1142 | for_each_queue(bp, i) | |
1143 | napi_enable(&bnx2x_fp(bp, i, napi)); | |
1144 | } | |
1145 | ||
1146 | static void bnx2x_napi_disable(struct bnx2x *bp) | |
1147 | { | |
1148 | int i; | |
1149 | ||
1150 | for_each_queue(bp, i) | |
1151 | napi_disable(&bnx2x_fp(bp, i, napi)); | |
1152 | } | |
1153 | ||
1154 | void bnx2x_netif_start(struct bnx2x *bp) | |
1155 | { | |
1156 | int intr_sem; | |
1157 | ||
1158 | intr_sem = atomic_dec_and_test(&bp->intr_sem); | |
1159 | smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ | |
1160 | ||
1161 | if (intr_sem) { | |
1162 | if (netif_running(bp->dev)) { | |
1163 | bnx2x_napi_enable(bp); | |
1164 | bnx2x_int_enable(bp); | |
1165 | if (bp->state == BNX2X_STATE_OPEN) | |
1166 | netif_tx_wake_all_queues(bp->dev); | |
1167 | } | |
1168 | } | |
1169 | } | |
1170 | ||
1171 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) | |
1172 | { | |
1173 | bnx2x_int_disable_sync(bp, disable_hw); | |
1174 | bnx2x_napi_disable(bp); | |
1175 | netif_tx_disable(bp->dev); | |
1176 | } | |
1177 | static int bnx2x_set_num_queues(struct bnx2x *bp) | |
1178 | { | |
1179 | int rc = 0; | |
1180 | ||
1181 | switch (bp->int_mode) { | |
1182 | case INT_MODE_INTx: | |
1183 | case INT_MODE_MSI: | |
1184 | bp->num_queues = 1; | |
1185 | DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); | |
1186 | break; | |
1187 | default: | |
1188 | /* Set number of queues according to bp->multi_mode value */ | |
1189 | bnx2x_set_num_queues_msix(bp); | |
1190 | ||
1191 | DP(NETIF_MSG_IFUP, "set number of queues to %d\n", | |
1192 | bp->num_queues); | |
1193 | ||
1194 | /* if we can't use MSI-X we only need one fp, | |
1195 | * so try to enable MSI-X with the requested number of fp's | |
1196 | * and fallback to MSI or legacy INTx with one fp | |
1197 | */ | |
1198 | rc = bnx2x_enable_msix(bp); | |
1199 | if (rc) | |
1200 | /* failed to enable MSI-X */ | |
1201 | bp->num_queues = 1; | |
1202 | break; | |
1203 | } | |
1204 | bp->dev->real_num_tx_queues = bp->num_queues; | |
1205 | return rc; | |
1206 | } | |
1207 | ||
1208 | /* must be called with rtnl_lock */ | |
1209 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |
1210 | { | |
1211 | u32 load_code; | |
1212 | int i, rc; | |
1213 | ||
1214 | #ifdef BNX2X_STOP_ON_ERROR | |
1215 | if (unlikely(bp->panic)) | |
1216 | return -EPERM; | |
1217 | #endif | |
1218 | ||
1219 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | |
1220 | ||
1221 | rc = bnx2x_set_num_queues(bp); | |
1222 | ||
1223 | if (bnx2x_alloc_mem(bp)) { | |
1224 | bnx2x_free_irq(bp, true); | |
1225 | return -ENOMEM; | |
1226 | } | |
1227 | ||
1228 | for_each_queue(bp, i) | |
1229 | bnx2x_fp(bp, i, disable_tpa) = | |
1230 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | |
1231 | ||
1232 | for_each_queue(bp, i) | |
1233 | netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), | |
1234 | bnx2x_poll, 128); | |
1235 | ||
1236 | bnx2x_napi_enable(bp); | |
1237 | ||
1238 | if (bp->flags & USING_MSIX_FLAG) { | |
1239 | rc = bnx2x_req_msix_irqs(bp); | |
1240 | if (rc) { | |
1241 | bnx2x_free_irq(bp, true); | |
1242 | goto load_error1; | |
1243 | } | |
1244 | } else { | |
1245 | /* Fall to INTx if failed to enable MSI-X due to lack of | |
1246 | memory (in bnx2x_set_num_queues()) */ | |
1247 | if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx)) | |
1248 | bnx2x_enable_msi(bp); | |
1249 | bnx2x_ack_int(bp); | |
1250 | rc = bnx2x_req_irq(bp); | |
1251 | if (rc) { | |
1252 | BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); | |
1253 | bnx2x_free_irq(bp, true); | |
1254 | goto load_error1; | |
1255 | } | |
1256 | if (bp->flags & USING_MSI_FLAG) { | |
1257 | bp->dev->irq = bp->pdev->irq; | |
1258 | netdev_info(bp->dev, "using MSI IRQ %d\n", | |
1259 | bp->pdev->irq); | |
1260 | } | |
1261 | } | |
1262 | ||
1263 | /* Send LOAD_REQUEST command to MCP | |
1264 | Returns the type of LOAD command: | |
1265 | if it is the first port to be initialized | |
1266 | common blocks should be initialized, otherwise - not | |
1267 | */ | |
1268 | if (!BP_NOMCP(bp)) { | |
1269 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); | |
1270 | if (!load_code) { | |
1271 | BNX2X_ERR("MCP response failure, aborting\n"); | |
1272 | rc = -EBUSY; | |
1273 | goto load_error2; | |
1274 | } | |
1275 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { | |
1276 | rc = -EBUSY; /* other port in diagnostic mode */ | |
1277 | goto load_error2; | |
1278 | } | |
1279 | ||
1280 | } else { | |
1281 | int port = BP_PORT(bp); | |
1282 | ||
1283 | DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n", | |
1284 | load_count[0], load_count[1], load_count[2]); | |
1285 | load_count[0]++; | |
1286 | load_count[1 + port]++; | |
1287 | DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n", | |
1288 | load_count[0], load_count[1], load_count[2]); | |
1289 | if (load_count[0] == 1) | |
1290 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; | |
1291 | else if (load_count[1 + port] == 1) | |
1292 | load_code = FW_MSG_CODE_DRV_LOAD_PORT; | |
1293 | else | |
1294 | load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; | |
1295 | } | |
1296 | ||
1297 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || | |
1298 | (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) | |
1299 | bp->port.pmf = 1; | |
1300 | else | |
1301 | bp->port.pmf = 0; | |
1302 | DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); | |
1303 | ||
1304 | /* Initialize HW */ | |
1305 | rc = bnx2x_init_hw(bp, load_code); | |
1306 | if (rc) { | |
1307 | BNX2X_ERR("HW init failed, aborting\n"); | |
1308 | bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); | |
1309 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); | |
1310 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | |
1311 | goto load_error2; | |
1312 | } | |
1313 | ||
1314 | /* Setup NIC internals and enable interrupts */ | |
1315 | bnx2x_nic_init(bp, load_code); | |
1316 | ||
1317 | if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) && | |
1318 | (bp->common.shmem2_base)) | |
1319 | SHMEM2_WR(bp, dcc_support, | |
1320 | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | | |
1321 | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); | |
1322 | ||
1323 | /* Send LOAD_DONE command to MCP */ | |
1324 | if (!BP_NOMCP(bp)) { | |
1325 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); | |
1326 | if (!load_code) { | |
1327 | BNX2X_ERR("MCP response failure, aborting\n"); | |
1328 | rc = -EBUSY; | |
1329 | goto load_error3; | |
1330 | } | |
1331 | } | |
1332 | ||
1333 | bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; | |
1334 | ||
1335 | rc = bnx2x_setup_leading(bp); | |
1336 | if (rc) { | |
1337 | BNX2X_ERR("Setup leading failed!\n"); | |
1338 | #ifndef BNX2X_STOP_ON_ERROR | |
1339 | goto load_error3; | |
1340 | #else | |
1341 | bp->panic = 1; | |
1342 | return -EBUSY; | |
1343 | #endif | |
1344 | } | |
1345 | ||
1346 | if (CHIP_IS_E1H(bp)) | |
1347 | if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { | |
1348 | DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); | |
1349 | bp->flags |= MF_FUNC_DIS; | |
1350 | } | |
1351 | ||
1352 | if (bp->state == BNX2X_STATE_OPEN) { | |
1353 | #ifdef BCM_CNIC | |
1354 | /* Enable Timer scan */ | |
1355 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); | |
1356 | #endif | |
1357 | for_each_nondefault_queue(bp, i) { | |
1358 | rc = bnx2x_setup_multi(bp, i); | |
1359 | if (rc) | |
1360 | #ifdef BCM_CNIC | |
1361 | goto load_error4; | |
1362 | #else | |
1363 | goto load_error3; | |
1364 | #endif | |
1365 | } | |
1366 | ||
1367 | if (CHIP_IS_E1(bp)) | |
1368 | bnx2x_set_eth_mac_addr_e1(bp, 1); | |
1369 | else | |
1370 | bnx2x_set_eth_mac_addr_e1h(bp, 1); | |
1371 | #ifdef BCM_CNIC | |
1372 | /* Set iSCSI L2 MAC */ | |
1373 | mutex_lock(&bp->cnic_mutex); | |
1374 | if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { | |
1375 | bnx2x_set_iscsi_eth_mac_addr(bp, 1); | |
1376 | bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; | |
1377 | bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, | |
1378 | CNIC_SB_ID(bp)); | |
1379 | } | |
1380 | mutex_unlock(&bp->cnic_mutex); | |
1381 | #endif | |
1382 | } | |
1383 | ||
1384 | if (bp->port.pmf) | |
1385 | bnx2x_initial_phy_init(bp, load_mode); | |
1386 | ||
1387 | /* Start fast path */ | |
1388 | switch (load_mode) { | |
1389 | case LOAD_NORMAL: | |
1390 | if (bp->state == BNX2X_STATE_OPEN) { | |
1391 | /* Tx queue should be only reenabled */ | |
1392 | netif_tx_wake_all_queues(bp->dev); | |
1393 | } | |
1394 | /* Initialize the receive filter. */ | |
1395 | bnx2x_set_rx_mode(bp->dev); | |
1396 | break; | |
1397 | ||
1398 | case LOAD_OPEN: | |
1399 | netif_tx_start_all_queues(bp->dev); | |
1400 | if (bp->state != BNX2X_STATE_OPEN) | |
1401 | netif_tx_disable(bp->dev); | |
1402 | /* Initialize the receive filter. */ | |
1403 | bnx2x_set_rx_mode(bp->dev); | |
1404 | break; | |
1405 | ||
1406 | case LOAD_DIAG: | |
1407 | /* Initialize the receive filter. */ | |
1408 | bnx2x_set_rx_mode(bp->dev); | |
1409 | bp->state = BNX2X_STATE_DIAG; | |
1410 | break; | |
1411 | ||
1412 | default: | |
1413 | break; | |
1414 | } | |
1415 | ||
1416 | if (!bp->port.pmf) | |
1417 | bnx2x__link_status_update(bp); | |
1418 | ||
1419 | /* start the timer */ | |
1420 | mod_timer(&bp->timer, jiffies + bp->current_interval); | |
1421 | ||
1422 | #ifdef BCM_CNIC | |
1423 | bnx2x_setup_cnic_irq_info(bp); | |
1424 | if (bp->state == BNX2X_STATE_OPEN) | |
1425 | bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); | |
1426 | #endif | |
1427 | bnx2x_inc_load_cnt(bp); | |
1428 | ||
1429 | return 0; | |
1430 | ||
1431 | #ifdef BCM_CNIC | |
1432 | load_error4: | |
1433 | /* Disable Timer scan */ | |
1434 | REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0); | |
1435 | #endif | |
1436 | load_error3: | |
1437 | bnx2x_int_disable_sync(bp, 1); | |
1438 | if (!BP_NOMCP(bp)) { | |
1439 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); | |
1440 | bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); | |
1441 | } | |
1442 | bp->port.pmf = 0; | |
1443 | /* Free SKBs, SGEs, TPA pool and driver internals */ | |
1444 | bnx2x_free_skbs(bp); | |
1445 | for_each_queue(bp, i) | |
1446 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | |
1447 | load_error2: | |
1448 | /* Release IRQs */ | |
1449 | bnx2x_free_irq(bp, false); | |
1450 | load_error1: | |
1451 | bnx2x_napi_disable(bp); | |
1452 | for_each_queue(bp, i) | |
1453 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | |
1454 | bnx2x_free_mem(bp); | |
1455 | ||
1456 | return rc; | |
1457 | } | |
1458 | ||
1459 | /* must be called with rtnl_lock */ | |
1460 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) | |
1461 | { | |
1462 | int i; | |
1463 | ||
1464 | if (bp->state == BNX2X_STATE_CLOSED) { | |
1465 | /* Interface has been removed - nothing to recover */ | |
1466 | bp->recovery_state = BNX2X_RECOVERY_DONE; | |
1467 | bp->is_leader = 0; | |
1468 | bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); | |
1469 | smp_wmb(); | |
1470 | ||
1471 | return -EINVAL; | |
1472 | } | |
1473 | ||
1474 | #ifdef BCM_CNIC | |
1475 | bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); | |
1476 | #endif | |
1477 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | |
1478 | ||
1479 | /* Set "drop all" */ | |
1480 | bp->rx_mode = BNX2X_RX_MODE_NONE; | |
1481 | bnx2x_set_storm_rx_mode(bp); | |
1482 | ||
1483 | /* Disable HW interrupts, NAPI and Tx */ | |
1484 | bnx2x_netif_stop(bp, 1); | |
1485 | netif_carrier_off(bp->dev); | |
1486 | ||
1487 | del_timer_sync(&bp->timer); | |
1488 | SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, | |
1489 | (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); | |
1490 | bnx2x_stats_handle(bp, STATS_EVENT_STOP); | |
1491 | ||
1492 | /* Release IRQs */ | |
1493 | bnx2x_free_irq(bp, false); | |
1494 | ||
1495 | /* Cleanup the chip if needed */ | |
1496 | if (unload_mode != UNLOAD_RECOVERY) | |
1497 | bnx2x_chip_cleanup(bp, unload_mode); | |
1498 | ||
1499 | bp->port.pmf = 0; | |
1500 | ||
1501 | /* Free SKBs, SGEs, TPA pool and driver internals */ | |
1502 | bnx2x_free_skbs(bp); | |
1503 | for_each_queue(bp, i) | |
1504 | bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); | |
1505 | for_each_queue(bp, i) | |
1506 | netif_napi_del(&bnx2x_fp(bp, i, napi)); | |
1507 | bnx2x_free_mem(bp); | |
1508 | ||
1509 | bp->state = BNX2X_STATE_CLOSED; | |
1510 | ||
1511 | /* The last driver must disable a "close the gate" if there is no | |
1512 | * parity attention or "process kill" pending. | |
1513 | */ | |
1514 | if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) && | |
1515 | bnx2x_reset_is_done(bp)) | |
1516 | bnx2x_disable_close_the_gate(bp); | |
1517 | ||
1518 | /* Reset MCP mail box sequence if there is on going recovery */ | |
1519 | if (unload_mode == UNLOAD_RECOVERY) | |
1520 | bp->fw_seq = 0; | |
1521 | ||
1522 | return 0; | |
1523 | } | |
1524 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | |
1525 | { | |
1526 | u16 pmcsr; | |
1527 | ||
1528 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); | |
1529 | ||
1530 | switch (state) { | |
1531 | case PCI_D0: | |
1532 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, | |
1533 | ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | | |
1534 | PCI_PM_CTRL_PME_STATUS)); | |
1535 | ||
1536 | if (pmcsr & PCI_PM_CTRL_STATE_MASK) | |
1537 | /* delay required during transition out of D3hot */ | |
1538 | msleep(20); | |
1539 | break; | |
1540 | ||
1541 | case PCI_D3hot: | |
1542 | /* If there are other clients above don't | |
1543 | shut down the power */ | |
1544 | if (atomic_read(&bp->pdev->enable_cnt) != 1) | |
1545 | return 0; | |
1546 | /* Don't shut down the power for emulation and FPGA */ | |
1547 | if (CHIP_REV_IS_SLOW(bp)) | |
1548 | return 0; | |
1549 | ||
1550 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
1551 | pmcsr |= 3; | |
1552 | ||
1553 | if (bp->wol) | |
1554 | pmcsr |= PCI_PM_CTRL_PME_ENABLE; | |
1555 | ||
1556 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, | |
1557 | pmcsr); | |
1558 | ||
1559 | /* No more memory access after this point until | |
1560 | * device is brought back to D0. | |
1561 | */ | |
1562 | break; | |
1563 | ||
1564 | default: | |
1565 | return -EINVAL; | |
1566 | } | |
1567 | return 0; | |
1568 | } | |
1569 | ||
1570 | ||
1571 | ||
1572 | /* | |
1573 | * net_device service functions | |
1574 | */ | |
1575 | ||
1576 | static int bnx2x_poll(struct napi_struct *napi, int budget) | |
1577 | { | |
1578 | int work_done = 0; | |
1579 | struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, | |
1580 | napi); | |
1581 | struct bnx2x *bp = fp->bp; | |
1582 | ||
1583 | while (1) { | |
1584 | #ifdef BNX2X_STOP_ON_ERROR | |
1585 | if (unlikely(bp->panic)) { | |
1586 | napi_complete(napi); | |
1587 | return 0; | |
1588 | } | |
1589 | #endif | |
1590 | ||
1591 | if (bnx2x_has_tx_work(fp)) | |
1592 | bnx2x_tx_int(fp); | |
1593 | ||
1594 | if (bnx2x_has_rx_work(fp)) { | |
1595 | work_done += bnx2x_rx_int(fp, budget - work_done); | |
1596 | ||
1597 | /* must not complete if we consumed full budget */ | |
1598 | if (work_done >= budget) | |
1599 | break; | |
1600 | } | |
1601 | ||
1602 | /* Fall out from the NAPI loop if needed */ | |
1603 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | |
1604 | bnx2x_update_fpsb_idx(fp); | |
1605 | /* bnx2x_has_rx_work() reads the status block, thus we need | |
1606 | * to ensure that status block indices have been actually read | |
1607 | * (bnx2x_update_fpsb_idx) prior to this check | |
1608 | * (bnx2x_has_rx_work) so that we won't write the "newer" | |
1609 | * value of the status block to IGU (if there was a DMA right | |
1610 | * after bnx2x_has_rx_work and if there is no rmb, the memory | |
1611 | * reading (bnx2x_update_fpsb_idx) may be postponed to right | |
1612 | * before bnx2x_ack_sb). In this case there will never be | |
1613 | * another interrupt until there is another update of the | |
1614 | * status block, while there is still unhandled work. | |
1615 | */ | |
1616 | rmb(); | |
1617 | ||
1618 | if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { | |
1619 | napi_complete(napi); | |
1620 | /* Re-enable interrupts */ | |
1621 | bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, | |
1622 | le16_to_cpu(fp->fp_c_idx), | |
1623 | IGU_INT_NOP, 1); | |
1624 | bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, | |
1625 | le16_to_cpu(fp->fp_u_idx), | |
1626 | IGU_INT_ENABLE, 1); | |
1627 | break; | |
1628 | } | |
1629 | } | |
1630 | } | |
1631 | ||
1632 | return work_done; | |
1633 | } | |
1634 | ||
1635 | ||
1636 | /* we split the first BD into headers and data BDs | |
1637 | * to ease the pain of our fellow microcode engineers | |
1638 | * we use one mapping for both BDs | |
1639 | * So far this has only been observed to happen | |
1640 | * in Other Operating Systems(TM) | |
1641 | */ | |
1642 | static noinline u16 bnx2x_tx_split(struct bnx2x *bp, | |
1643 | struct bnx2x_fastpath *fp, | |
1644 | struct sw_tx_bd *tx_buf, | |
1645 | struct eth_tx_start_bd **tx_bd, u16 hlen, | |
1646 | u16 bd_prod, int nbd) | |
1647 | { | |
1648 | struct eth_tx_start_bd *h_tx_bd = *tx_bd; | |
1649 | struct eth_tx_bd *d_tx_bd; | |
1650 | dma_addr_t mapping; | |
1651 | int old_len = le16_to_cpu(h_tx_bd->nbytes); | |
1652 | ||
1653 | /* first fix first BD */ | |
1654 | h_tx_bd->nbd = cpu_to_le16(nbd); | |
1655 | h_tx_bd->nbytes = cpu_to_le16(hlen); | |
1656 | ||
1657 | DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " | |
1658 | "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, | |
1659 | h_tx_bd->addr_lo, h_tx_bd->nbd); | |
1660 | ||
1661 | /* now get a new data BD | |
1662 | * (after the pbd) and fill it */ | |
1663 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | |
1664 | d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | |
1665 | ||
1666 | mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), | |
1667 | le32_to_cpu(h_tx_bd->addr_lo)) + hlen; | |
1668 | ||
1669 | d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | |
1670 | d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | |
1671 | d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); | |
1672 | ||
1673 | /* this marks the BD as one that has no individual mapping */ | |
1674 | tx_buf->flags |= BNX2X_TSO_SPLIT_BD; | |
1675 | ||
1676 | DP(NETIF_MSG_TX_QUEUED, | |
1677 | "TSO split data size is %d (%x:%x)\n", | |
1678 | d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); | |
1679 | ||
1680 | /* update tx_bd */ | |
1681 | *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; | |
1682 | ||
1683 | return bd_prod; | |
1684 | } | |
1685 | ||
1686 | static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) | |
1687 | { | |
1688 | if (fix > 0) | |
1689 | csum = (u16) ~csum_fold(csum_sub(csum, | |
1690 | csum_partial(t_header - fix, fix, 0))); | |
1691 | ||
1692 | else if (fix < 0) | |
1693 | csum = (u16) ~csum_fold(csum_add(csum, | |
1694 | csum_partial(t_header, -fix, 0))); | |
1695 | ||
1696 | return swab16(csum); | |
1697 | } | |
1698 | ||
1699 | static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) | |
1700 | { | |
1701 | u32 rc; | |
1702 | ||
1703 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
1704 | rc = XMIT_PLAIN; | |
1705 | ||
1706 | else { | |
1707 | if (skb->protocol == htons(ETH_P_IPV6)) { | |
1708 | rc = XMIT_CSUM_V6; | |
1709 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | |
1710 | rc |= XMIT_CSUM_TCP; | |
1711 | ||
1712 | } else { | |
1713 | rc = XMIT_CSUM_V4; | |
1714 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | |
1715 | rc |= XMIT_CSUM_TCP; | |
1716 | } | |
1717 | } | |
1718 | ||
1719 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | |
1720 | rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); | |
1721 | ||
1722 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | |
1723 | rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6); | |
1724 | ||
1725 | return rc; | |
1726 | } | |
1727 | ||
1728 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) | |
1729 | /* check if packet requires linearization (packet is too fragmented) | |
1730 | no need to check fragmentation if page size > 8K (there will be no | |
1731 | violation to FW restrictions) */ | |
1732 | static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, | |
1733 | u32 xmit_type) | |
1734 | { | |
1735 | int to_copy = 0; | |
1736 | int hlen = 0; | |
1737 | int first_bd_sz = 0; | |
1738 | ||
1739 | /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ | |
1740 | if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { | |
1741 | ||
1742 | if (xmit_type & XMIT_GSO) { | |
1743 | unsigned short lso_mss = skb_shinfo(skb)->gso_size; | |
1744 | /* Check if LSO packet needs to be copied: | |
1745 | 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ | |
1746 | int wnd_size = MAX_FETCH_BD - 3; | |
1747 | /* Number of windows to check */ | |
1748 | int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; | |
1749 | int wnd_idx = 0; | |
1750 | int frag_idx = 0; | |
1751 | u32 wnd_sum = 0; | |
1752 | ||
1753 | /* Headers length */ | |
1754 | hlen = (int)(skb_transport_header(skb) - skb->data) + | |
1755 | tcp_hdrlen(skb); | |
1756 | ||
1757 | /* Amount of data (w/o headers) on linear part of SKB*/ | |
1758 | first_bd_sz = skb_headlen(skb) - hlen; | |
1759 | ||
1760 | wnd_sum = first_bd_sz; | |
1761 | ||
1762 | /* Calculate the first sum - it's special */ | |
1763 | for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) | |
1764 | wnd_sum += | |
1765 | skb_shinfo(skb)->frags[frag_idx].size; | |
1766 | ||
1767 | /* If there was data on linear skb data - check it */ | |
1768 | if (first_bd_sz > 0) { | |
1769 | if (unlikely(wnd_sum < lso_mss)) { | |
1770 | to_copy = 1; | |
1771 | goto exit_lbl; | |
1772 | } | |
1773 | ||
1774 | wnd_sum -= first_bd_sz; | |
1775 | } | |
1776 | ||
1777 | /* Others are easier: run through the frag list and | |
1778 | check all windows */ | |
1779 | for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { | |
1780 | wnd_sum += | |
1781 | skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size; | |
1782 | ||
1783 | if (unlikely(wnd_sum < lso_mss)) { | |
1784 | to_copy = 1; | |
1785 | break; | |
1786 | } | |
1787 | wnd_sum -= | |
1788 | skb_shinfo(skb)->frags[wnd_idx].size; | |
1789 | } | |
1790 | } else { | |
1791 | /* in non-LSO too fragmented packet should always | |
1792 | be linearized */ | |
1793 | to_copy = 1; | |
1794 | } | |
1795 | } | |
1796 | ||
1797 | exit_lbl: | |
1798 | if (unlikely(to_copy)) | |
1799 | DP(NETIF_MSG_TX_QUEUED, | |
1800 | "Linearization IS REQUIRED for %s packet. " | |
1801 | "num_frags %d hlen %d first_bd_sz %d\n", | |
1802 | (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", | |
1803 | skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); | |
1804 | ||
1805 | return to_copy; | |
1806 | } | |
1807 | #endif | |
1808 | ||
1809 | /* called with netif_tx_lock | |
1810 | * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call | |
1811 | * netif_wake_queue() | |
1812 | */ | |
1813 | netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1814 | { | |
1815 | struct bnx2x *bp = netdev_priv(dev); | |
1816 | struct bnx2x_fastpath *fp; | |
1817 | struct netdev_queue *txq; | |
1818 | struct sw_tx_bd *tx_buf; | |
1819 | struct eth_tx_start_bd *tx_start_bd; | |
1820 | struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; | |
1821 | struct eth_tx_parse_bd *pbd = NULL; | |
1822 | u16 pkt_prod, bd_prod; | |
1823 | int nbd, fp_index; | |
1824 | dma_addr_t mapping; | |
1825 | u32 xmit_type = bnx2x_xmit_type(bp, skb); | |
1826 | int i; | |
1827 | u8 hlen = 0; | |
1828 | __le16 pkt_size = 0; | |
1829 | struct ethhdr *eth; | |
1830 | u8 mac_type = UNICAST_ADDRESS; | |
1831 | ||
1832 | #ifdef BNX2X_STOP_ON_ERROR | |
1833 | if (unlikely(bp->panic)) | |
1834 | return NETDEV_TX_BUSY; | |
1835 | #endif | |
1836 | ||
1837 | fp_index = skb_get_queue_mapping(skb); | |
1838 | txq = netdev_get_tx_queue(dev, fp_index); | |
1839 | ||
1840 | fp = &bp->fp[fp_index]; | |
1841 | ||
1842 | if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { | |
1843 | fp->eth_q_stats.driver_xoff++; | |
1844 | netif_tx_stop_queue(txq); | |
1845 | BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); | |
1846 | return NETDEV_TX_BUSY; | |
1847 | } | |
1848 | ||
1849 | DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)" | |
1850 | " gso type %x xmit_type %x\n", | |
1851 | skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, | |
1852 | ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); | |
1853 | ||
1854 | eth = (struct ethhdr *)skb->data; | |
1855 | ||
1856 | /* set flag according to packet type (UNICAST_ADDRESS is default)*/ | |
1857 | if (unlikely(is_multicast_ether_addr(eth->h_dest))) { | |
1858 | if (is_broadcast_ether_addr(eth->h_dest)) | |
1859 | mac_type = BROADCAST_ADDRESS; | |
1860 | else | |
1861 | mac_type = MULTICAST_ADDRESS; | |
1862 | } | |
1863 | ||
1864 | #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) | |
1865 | /* First, check if we need to linearize the skb (due to FW | |
1866 | restrictions). No need to check fragmentation if page size > 8K | |
1867 | (there will be no violation to FW restrictions) */ | |
1868 | if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { | |
1869 | /* Statistics of linearization */ | |
1870 | bp->lin_cnt++; | |
1871 | if (skb_linearize(skb) != 0) { | |
1872 | DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " | |
1873 | "silently dropping this SKB\n"); | |
1874 | dev_kfree_skb_any(skb); | |
1875 | return NETDEV_TX_OK; | |
1876 | } | |
1877 | } | |
1878 | #endif | |
1879 | ||
1880 | /* | |
1881 | Please read carefully. First we use one BD which we mark as start, | |
1882 | then we have a parsing info BD (used for TSO or xsum), | |
1883 | and only then we have the rest of the TSO BDs. | |
1884 | (don't forget to mark the last one as last, | |
1885 | and to unmap only AFTER you write to the BD ...) | |
1886 | And above all, all pdb sizes are in words - NOT DWORDS! | |
1887 | */ | |
1888 | ||
1889 | pkt_prod = fp->tx_pkt_prod++; | |
1890 | bd_prod = TX_BD(fp->tx_bd_prod); | |
1891 | ||
1892 | /* get a tx_buf and first BD */ | |
1893 | tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; | |
1894 | tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; | |
1895 | ||
1896 | tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; | |
1897 | tx_start_bd->general_data = (mac_type << | |
1898 | ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); | |
1899 | /* header nbd */ | |
1900 | tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); | |
1901 | ||
1902 | /* remember the first BD of the packet */ | |
1903 | tx_buf->first_bd = fp->tx_bd_prod; | |
1904 | tx_buf->skb = skb; | |
1905 | tx_buf->flags = 0; | |
1906 | ||
1907 | DP(NETIF_MSG_TX_QUEUED, | |
1908 | "sending pkt %u @%p next_idx %u bd %u @%p\n", | |
1909 | pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); | |
1910 | ||
1911 | #ifdef BCM_VLAN | |
1912 | if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && | |
1913 | (bp->flags & HW_VLAN_TX_FLAG)) { | |
1914 | tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); | |
1915 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; | |
1916 | } else | |
1917 | #endif | |
1918 | tx_start_bd->vlan = cpu_to_le16(pkt_prod); | |
1919 | ||
1920 | /* turn on parsing and get a BD */ | |
1921 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | |
1922 | pbd = &fp->tx_desc_ring[bd_prod].parse_bd; | |
1923 | ||
1924 | memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); | |
1925 | ||
1926 | if (xmit_type & XMIT_CSUM) { | |
1927 | hlen = (skb_network_header(skb) - skb->data) / 2; | |
1928 | ||
1929 | /* for now NS flag is not used in Linux */ | |
1930 | pbd->global_data = | |
1931 | (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << | |
1932 | ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); | |
1933 | ||
1934 | pbd->ip_hlen = (skb_transport_header(skb) - | |
1935 | skb_network_header(skb)) / 2; | |
1936 | ||
1937 | hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2; | |
1938 | ||
1939 | pbd->total_hlen = cpu_to_le16(hlen); | |
1940 | hlen = hlen*2; | |
1941 | ||
1942 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; | |
1943 | ||
1944 | if (xmit_type & XMIT_CSUM_V4) | |
1945 | tx_start_bd->bd_flags.as_bitfield |= | |
1946 | ETH_TX_BD_FLAGS_IP_CSUM; | |
1947 | else | |
1948 | tx_start_bd->bd_flags.as_bitfield |= | |
1949 | ETH_TX_BD_FLAGS_IPV6; | |
1950 | ||
1951 | if (xmit_type & XMIT_CSUM_TCP) { | |
1952 | pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); | |
1953 | ||
1954 | } else { | |
1955 | s8 fix = SKB_CS_OFF(skb); /* signed! */ | |
1956 | ||
1957 | pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG; | |
1958 | ||
1959 | DP(NETIF_MSG_TX_QUEUED, | |
1960 | "hlen %d fix %d csum before fix %x\n", | |
1961 | le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb)); | |
1962 | ||
1963 | /* HW bug: fixup the CSUM */ | |
1964 | pbd->tcp_pseudo_csum = | |
1965 | bnx2x_csum_fix(skb_transport_header(skb), | |
1966 | SKB_CS(skb), fix); | |
1967 | ||
1968 | DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", | |
1969 | pbd->tcp_pseudo_csum); | |
1970 | } | |
1971 | } | |
1972 | ||
1973 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | |
1974 | skb_headlen(skb), DMA_TO_DEVICE); | |
1975 | ||
1976 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | |
1977 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | |
1978 | nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ | |
1979 | tx_start_bd->nbd = cpu_to_le16(nbd); | |
1980 | tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); | |
1981 | pkt_size = tx_start_bd->nbytes; | |
1982 | ||
1983 | DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" | |
1984 | " nbytes %d flags %x vlan %x\n", | |
1985 | tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, | |
1986 | le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), | |
1987 | tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); | |
1988 | ||
1989 | if (xmit_type & XMIT_GSO) { | |
1990 | ||
1991 | DP(NETIF_MSG_TX_QUEUED, | |
1992 | "TSO packet len %d hlen %d total len %d tso size %d\n", | |
1993 | skb->len, hlen, skb_headlen(skb), | |
1994 | skb_shinfo(skb)->gso_size); | |
1995 | ||
1996 | tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; | |
1997 | ||
1998 | if (unlikely(skb_headlen(skb) > hlen)) | |
1999 | bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, | |
2000 | hlen, bd_prod, ++nbd); | |
2001 | ||
2002 | pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | |
2003 | pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); | |
2004 | pbd->tcp_flags = pbd_tcp_flags(skb); | |
2005 | ||
2006 | if (xmit_type & XMIT_GSO_V4) { | |
2007 | pbd->ip_id = swab16(ip_hdr(skb)->id); | |
2008 | pbd->tcp_pseudo_csum = | |
2009 | swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
2010 | ip_hdr(skb)->daddr, | |
2011 | 0, IPPROTO_TCP, 0)); | |
2012 | ||
2013 | } else | |
2014 | pbd->tcp_pseudo_csum = | |
2015 | swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
2016 | &ipv6_hdr(skb)->daddr, | |
2017 | 0, IPPROTO_TCP, 0)); | |
2018 | ||
2019 | pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN; | |
2020 | } | |
2021 | tx_data_bd = (struct eth_tx_bd *)tx_start_bd; | |
2022 | ||
2023 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
2024 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
2025 | ||
2026 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | |
2027 | tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | |
2028 | if (total_pkt_bd == NULL) | |
2029 | total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | |
2030 | ||
2031 | mapping = dma_map_page(&bp->pdev->dev, frag->page, | |
2032 | frag->page_offset, | |
2033 | frag->size, DMA_TO_DEVICE); | |
2034 | ||
2035 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | |
2036 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | |
2037 | tx_data_bd->nbytes = cpu_to_le16(frag->size); | |
2038 | le16_add_cpu(&pkt_size, frag->size); | |
2039 | ||
2040 | DP(NETIF_MSG_TX_QUEUED, | |
2041 | "frag %d bd @%p addr (%x:%x) nbytes %d\n", | |
2042 | i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, | |
2043 | le16_to_cpu(tx_data_bd->nbytes)); | |
2044 | } | |
2045 | ||
2046 | DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); | |
2047 | ||
2048 | bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); | |
2049 | ||
2050 | /* now send a tx doorbell, counting the next BD | |
2051 | * if the packet contains or ends with it | |
2052 | */ | |
2053 | if (TX_BD_POFF(bd_prod) < nbd) | |
2054 | nbd++; | |
2055 | ||
2056 | if (total_pkt_bd != NULL) | |
2057 | total_pkt_bd->total_pkt_bytes = pkt_size; | |
2058 | ||
2059 | if (pbd) | |
2060 | DP(NETIF_MSG_TX_QUEUED, | |
2061 | "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" | |
2062 | " tcp_flags %x xsum %x seq %u hlen %u\n", | |
2063 | pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, | |
2064 | pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, | |
2065 | pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); | |
2066 | ||
2067 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); | |
2068 | ||
2069 | /* | |
2070 | * Make sure that the BD data is updated before updating the producer | |
2071 | * since FW might read the BD right after the producer is updated. | |
2072 | * This is only applicable for weak-ordered memory model archs such | |
2073 | * as IA-64. The following barrier is also mandatory since FW will | |
2074 | * assumes packets must have BDs. | |
2075 | */ | |
2076 | wmb(); | |
2077 | ||
2078 | fp->tx_db.data.prod += nbd; | |
2079 | barrier(); | |
2080 | DOORBELL(bp, fp->index, fp->tx_db.raw); | |
2081 | ||
2082 | mmiowb(); | |
2083 | ||
2084 | fp->tx_bd_prod += nbd; | |
2085 | ||
2086 | if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { | |
2087 | netif_tx_stop_queue(txq); | |
2088 | ||
2089 | /* paired memory barrier is in bnx2x_tx_int(), we have to keep | |
2090 | * ordering of set_bit() in netif_tx_stop_queue() and read of | |
2091 | * fp->bd_tx_cons */ | |
2092 | smp_mb(); | |
2093 | ||
2094 | fp->eth_q_stats.driver_xoff++; | |
2095 | if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) | |
2096 | netif_tx_wake_queue(txq); | |
2097 | } | |
2098 | fp->tx_pkt++; | |
2099 | ||
2100 | return NETDEV_TX_OK; | |
2101 | } | |
2102 | /* called with rtnl_lock */ | |
2103 | int bnx2x_change_mac_addr(struct net_device *dev, void *p) | |
2104 | { | |
2105 | struct sockaddr *addr = p; | |
2106 | struct bnx2x *bp = netdev_priv(dev); | |
2107 | ||
2108 | if (!is_valid_ether_addr((u8 *)(addr->sa_data))) | |
2109 | return -EINVAL; | |
2110 | ||
2111 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
2112 | if (netif_running(dev)) { | |
2113 | if (CHIP_IS_E1(bp)) | |
2114 | bnx2x_set_eth_mac_addr_e1(bp, 1); | |
2115 | else | |
2116 | bnx2x_set_eth_mac_addr_e1h(bp, 1); | |
2117 | } | |
2118 | ||
2119 | return 0; | |
2120 | } | |
2121 | ||
2122 | /* called with rtnl_lock */ | |
2123 | int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | |
2124 | { | |
2125 | struct bnx2x *bp = netdev_priv(dev); | |
2126 | int rc = 0; | |
2127 | ||
2128 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | |
2129 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | |
2130 | return -EAGAIN; | |
2131 | } | |
2132 | ||
2133 | if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || | |
2134 | ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) | |
2135 | return -EINVAL; | |
2136 | ||
2137 | /* This does not race with packet allocation | |
2138 | * because the actual alloc size is | |
2139 | * only updated as part of load | |
2140 | */ | |
2141 | dev->mtu = new_mtu; | |
2142 | ||
2143 | if (netif_running(dev)) { | |
2144 | bnx2x_nic_unload(bp, UNLOAD_NORMAL); | |
2145 | rc = bnx2x_nic_load(bp, LOAD_NORMAL); | |
2146 | } | |
2147 | ||
2148 | return rc; | |
2149 | } | |
2150 | ||
2151 | void bnx2x_tx_timeout(struct net_device *dev) | |
2152 | { | |
2153 | struct bnx2x *bp = netdev_priv(dev); | |
2154 | ||
2155 | #ifdef BNX2X_STOP_ON_ERROR | |
2156 | if (!bp->panic) | |
2157 | bnx2x_panic(); | |
2158 | #endif | |
2159 | /* This allows the netif to be shutdown gracefully before resetting */ | |
2160 | schedule_delayed_work(&bp->reset_task, 0); | |
2161 | } | |
2162 | ||
2163 | #ifdef BCM_VLAN | |
2164 | /* called with rtnl_lock */ | |
2165 | void bnx2x_vlan_rx_register(struct net_device *dev, | |
2166 | struct vlan_group *vlgrp) | |
2167 | { | |
2168 | struct bnx2x *bp = netdev_priv(dev); | |
2169 | ||
2170 | bp->vlgrp = vlgrp; | |
2171 | ||
2172 | /* Set flags according to the required capabilities */ | |
2173 | bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG); | |
2174 | ||
2175 | if (dev->features & NETIF_F_HW_VLAN_TX) | |
2176 | bp->flags |= HW_VLAN_TX_FLAG; | |
2177 | ||
2178 | if (dev->features & NETIF_F_HW_VLAN_RX) | |
2179 | bp->flags |= HW_VLAN_RX_FLAG; | |
2180 | ||
2181 | if (netif_running(dev)) | |
2182 | bnx2x_set_client_config(bp); | |
2183 | } | |
2184 | ||
2185 | #endif | |
2186 | int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) | |
2187 | { | |
2188 | struct net_device *dev = pci_get_drvdata(pdev); | |
2189 | struct bnx2x *bp; | |
2190 | ||
2191 | if (!dev) { | |
2192 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); | |
2193 | return -ENODEV; | |
2194 | } | |
2195 | bp = netdev_priv(dev); | |
2196 | ||
2197 | rtnl_lock(); | |
2198 | ||
2199 | pci_save_state(pdev); | |
2200 | ||
2201 | if (!netif_running(dev)) { | |
2202 | rtnl_unlock(); | |
2203 | return 0; | |
2204 | } | |
2205 | ||
2206 | netif_device_detach(dev); | |
2207 | ||
2208 | bnx2x_nic_unload(bp, UNLOAD_CLOSE); | |
2209 | ||
2210 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); | |
2211 | ||
2212 | rtnl_unlock(); | |
2213 | ||
2214 | return 0; | |
2215 | } | |
2216 | ||
2217 | int bnx2x_resume(struct pci_dev *pdev) | |
2218 | { | |
2219 | struct net_device *dev = pci_get_drvdata(pdev); | |
2220 | struct bnx2x *bp; | |
2221 | int rc; | |
2222 | ||
2223 | if (!dev) { | |
2224 | dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); | |
2225 | return -ENODEV; | |
2226 | } | |
2227 | bp = netdev_priv(dev); | |
2228 | ||
2229 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | |
2230 | printk(KERN_ERR "Handling parity error recovery. Try again later\n"); | |
2231 | return -EAGAIN; | |
2232 | } | |
2233 | ||
2234 | rtnl_lock(); | |
2235 | ||
2236 | pci_restore_state(pdev); | |
2237 | ||
2238 | if (!netif_running(dev)) { | |
2239 | rtnl_unlock(); | |
2240 | return 0; | |
2241 | } | |
2242 | ||
2243 | bnx2x_set_power_state(bp, PCI_D0); | |
2244 | netif_device_attach(dev); | |
2245 | ||
2246 | rc = bnx2x_nic_load(bp, LOAD_OPEN); | |
2247 | ||
2248 | rtnl_unlock(); | |
2249 | ||
2250 | return rc; | |
2251 | } |