Commit | Line | Data |
---|---|---|
9f6c9258 DK |
1 | /* bnx2x_cmn.h: Broadcom Everest network driver. |
2 | * | |
3 | * Copyright (c) 2007-2010 Broadcom Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> | |
10 | * Written by: Eliezer Tamir | |
11 | * Based on code from Michael Chan's bnx2 driver | |
12 | * UDP CSUM errata workaround by Arik Gendelman | |
13 | * Slowpath and fastpath rework by Vladislav Zolotarov | |
14 | * Statistics and Link management by Yitchak Gertner | |
15 | * | |
16 | */ | |
17 | #ifndef BNX2X_CMN_H | |
18 | #define BNX2X_CMN_H | |
19 | ||
20 | #include <linux/types.h> | |
21 | #include <linux/netdevice.h> | |
22 | ||
23 | ||
24 | #include "bnx2x.h" | |
25 | ||
26 | ||
27 | /*********************** Interfaces **************************** | |
28 | * Functions that need to be implemented by each driver version | |
29 | */ | |
30 | ||
31 | /** | |
32 | * Initialize link parameters structure variables. | |
33 | * | |
34 | * @param bp | |
35 | * @param load_mode | |
36 | * | |
37 | * @return u8 | |
38 | */ | |
39 | u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); | |
40 | ||
41 | /** | |
42 | * Configure hw according to link parameters structure. | |
43 | * | |
44 | * @param bp | |
45 | */ | |
46 | void bnx2x_link_set(struct bnx2x *bp); | |
47 | ||
48 | /** | |
49 | * Query link status | |
50 | * | |
51 | * @param bp | |
52 | * | |
53 | * @return 0 - link is UP | |
54 | */ | |
55 | u8 bnx2x_link_test(struct bnx2x *bp); | |
56 | ||
57 | /** | |
58 | * Handles link status change | |
59 | * | |
60 | * @param bp | |
61 | */ | |
62 | void bnx2x__link_status_update(struct bnx2x *bp); | |
63 | ||
64 | /** | |
65 | * MSI-X slowpath interrupt handler | |
66 | * | |
67 | * @param irq | |
68 | * @param dev_instance | |
69 | * | |
70 | * @return irqreturn_t | |
71 | */ | |
72 | irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); | |
73 | ||
74 | /** | |
75 | * non MSI-X interrupt handler | |
76 | * | |
77 | * @param irq | |
78 | * @param dev_instance | |
79 | * | |
80 | * @return irqreturn_t | |
81 | */ | |
82 | irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); | |
83 | #ifdef BCM_CNIC | |
84 | ||
85 | /** | |
86 | * Send command to cnic driver | |
87 | * | |
88 | * @param bp | |
89 | * @param cmd | |
90 | */ | |
91 | int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); | |
92 | ||
93 | /** | |
94 | * Provides cnic information for proper interrupt handling | |
95 | * | |
96 | * @param bp | |
97 | */ | |
98 | void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); | |
99 | #endif | |
100 | ||
101 | /** | |
102 | * Enable HW interrupts. | |
103 | * | |
104 | * @param bp | |
105 | */ | |
106 | void bnx2x_int_enable(struct bnx2x *bp); | |
107 | ||
108 | /** | |
109 | * Disable interrupts. This function ensures that there are no | |
110 | * ISRs or SP DPCs (sp_task) are running after it returns. | |
111 | * | |
112 | * @param bp | |
113 | * @param disable_hw if true, disable HW interrupts. | |
114 | */ | |
115 | void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); | |
116 | ||
6891dd25 DK |
117 | /** |
118 | * Loads device firmware | |
119 | * | |
120 | * @param bp | |
121 | * | |
122 | * @return int | |
123 | */ | |
124 | int bnx2x_init_firmware(struct bnx2x *bp); | |
125 | ||
9f6c9258 DK |
126 | /** |
127 | * Init HW blocks according to current initialization stage: | |
128 | * COMMON, PORT or FUNCTION. | |
129 | * | |
130 | * @param bp | |
131 | * @param load_code: COMMON, PORT or FUNCTION | |
132 | * | |
133 | * @return int | |
134 | */ | |
135 | int bnx2x_init_hw(struct bnx2x *bp, u32 load_code); | |
136 | ||
137 | /** | |
138 | * Init driver internals: | |
139 | * - rings | |
140 | * - status blocks | |
141 | * - etc. | |
142 | * | |
143 | * @param bp | |
144 | * @param load_code COMMON, PORT or FUNCTION | |
145 | */ | |
146 | void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); | |
147 | ||
148 | /** | |
149 | * Allocate driver's memory. | |
150 | * | |
151 | * @param bp | |
152 | * | |
153 | * @return int | |
154 | */ | |
155 | int bnx2x_alloc_mem(struct bnx2x *bp); | |
156 | ||
157 | /** | |
158 | * Release driver's memory. | |
159 | * | |
160 | * @param bp | |
161 | */ | |
162 | void bnx2x_free_mem(struct bnx2x *bp); | |
163 | ||
164 | /** | |
165 | * Bring up a leading (the first) eth Client. | |
166 | * | |
167 | * @param bp | |
168 | * | |
169 | * @return int | |
170 | */ | |
171 | int bnx2x_setup_leading(struct bnx2x *bp); | |
172 | ||
173 | /** | |
174 | * Setup non-leading eth Client. | |
175 | * | |
176 | * @param bp | |
177 | * @param fp | |
178 | * | |
179 | * @return int | |
180 | */ | |
181 | int bnx2x_setup_multi(struct bnx2x *bp, int index); | |
182 | ||
183 | /** | |
184 | * Set number of quueus according to mode and number of available | |
185 | * msi-x vectors | |
186 | * | |
187 | * @param bp | |
188 | * | |
189 | */ | |
190 | void bnx2x_set_num_queues_msix(struct bnx2x *bp); | |
191 | ||
192 | /** | |
193 | * Cleanup chip internals: | |
194 | * - Cleanup MAC configuration. | |
195 | * - Close clients. | |
196 | * - etc. | |
197 | * | |
198 | * @param bp | |
199 | * @param unload_mode | |
200 | */ | |
201 | void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); | |
202 | ||
203 | /** | |
204 | * Acquire HW lock. | |
205 | * | |
206 | * @param bp | |
207 | * @param resource Resource bit which was locked | |
208 | * | |
209 | * @return int | |
210 | */ | |
211 | int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); | |
212 | ||
213 | /** | |
214 | * Release HW lock. | |
215 | * | |
216 | * @param bp driver handle | |
217 | * @param resource Resource bit which was locked | |
218 | * | |
219 | * @return int | |
220 | */ | |
221 | int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); | |
222 | ||
223 | /** | |
224 | * Configure eth MAC address in the HW according to the value in | |
225 | * netdev->dev_addr for 57711 | |
226 | * | |
227 | * @param bp driver handle | |
228 | * @param set | |
229 | */ | |
230 | void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); | |
231 | ||
232 | /** | |
233 | * Configure eth MAC address in the HW according to the value in | |
234 | * netdev->dev_addr for 57710 | |
235 | * | |
236 | * @param bp driver handle | |
237 | * @param set | |
238 | */ | |
239 | void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set); | |
240 | ||
241 | #ifdef BCM_CNIC | |
242 | /** | |
243 | * Set iSCSI MAC(s) at the next enties in the CAM after the ETH | |
244 | * MAC(s). The function will wait until the ramrod completion | |
245 | * returns. | |
246 | * | |
247 | * @param bp driver handle | |
248 | * @param set set or clear the CAM entry | |
249 | * | |
250 | * @return 0 if cussess, -ENODEV if ramrod doesn't return. | |
251 | */ | |
252 | int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set); | |
253 | #endif | |
254 | ||
255 | /** | |
256 | * Initialize status block in FW and HW | |
257 | * | |
258 | * @param bp driver handle | |
259 | * @param sb host_status_block | |
260 | * @param dma_addr_t mapping | |
261 | * @param int sb_id | |
262 | */ | |
263 | void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, | |
264 | dma_addr_t mapping, int sb_id); | |
265 | ||
266 | /** | |
267 | * Reconfigure FW/HW according to dev->flags rx mode | |
268 | * | |
269 | * @param dev net_device | |
270 | * | |
271 | */ | |
272 | void bnx2x_set_rx_mode(struct net_device *dev); | |
273 | ||
274 | /** | |
275 | * Configure MAC filtering rules in a FW. | |
276 | * | |
277 | * @param bp driver handle | |
278 | */ | |
279 | void bnx2x_set_storm_rx_mode(struct bnx2x *bp); | |
280 | ||
281 | /* Parity errors related */ | |
282 | void bnx2x_inc_load_cnt(struct bnx2x *bp); | |
283 | u32 bnx2x_dec_load_cnt(struct bnx2x *bp); | |
284 | bool bnx2x_chk_parity_attn(struct bnx2x *bp); | |
285 | bool bnx2x_reset_is_done(struct bnx2x *bp); | |
286 | void bnx2x_disable_close_the_gate(struct bnx2x *bp); | |
287 | ||
288 | /** | |
289 | * Perform statistics handling according to event | |
290 | * | |
291 | * @param bp driver handle | |
292 | * @param even tbnx2x_stats_event | |
293 | */ | |
294 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | |
295 | ||
296 | /** | |
297 | * Configures FW with client paramteres (like HW VLAN removal) | |
298 | * for each active client. | |
299 | * | |
300 | * @param bp | |
301 | */ | |
302 | void bnx2x_set_client_config(struct bnx2x *bp); | |
303 | ||
304 | /** | |
305 | * Handle sp events | |
306 | * | |
307 | * @param fp fastpath handle for the event | |
308 | * @param rr_cqe eth_rx_cqe | |
309 | */ | |
310 | void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); | |
311 | ||
312 | ||
313 | static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) | |
314 | { | |
315 | struct host_status_block *fpsb = fp->status_blk; | |
316 | ||
317 | barrier(); /* status block is written to by the chip */ | |
318 | fp->fp_c_idx = fpsb->c_status_block.status_block_index; | |
319 | fp->fp_u_idx = fpsb->u_status_block.status_block_index; | |
320 | } | |
321 | ||
322 | static inline void bnx2x_update_rx_prod(struct bnx2x *bp, | |
323 | struct bnx2x_fastpath *fp, | |
324 | u16 bd_prod, u16 rx_comp_prod, | |
325 | u16 rx_sge_prod) | |
326 | { | |
327 | struct ustorm_eth_rx_producers rx_prods = {0}; | |
328 | int i; | |
329 | ||
330 | /* Update producers */ | |
331 | rx_prods.bd_prod = bd_prod; | |
332 | rx_prods.cqe_prod = rx_comp_prod; | |
333 | rx_prods.sge_prod = rx_sge_prod; | |
334 | ||
335 | /* | |
336 | * Make sure that the BD and SGE data is updated before updating the | |
337 | * producers since FW might read the BD/SGE right after the producer | |
338 | * is updated. | |
339 | * This is only applicable for weak-ordered memory model archs such | |
340 | * as IA-64. The following barrier is also mandatory since FW will | |
341 | * assumes BDs must have buffers. | |
342 | */ | |
343 | wmb(); | |
344 | ||
345 | for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) | |
346 | REG_WR(bp, BAR_USTRORM_INTMEM + | |
347 | USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4, | |
348 | ((u32 *)&rx_prods)[i]); | |
349 | ||
350 | mmiowb(); /* keep prod updates ordered */ | |
351 | ||
352 | DP(NETIF_MSG_RX_STATUS, | |
353 | "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", | |
354 | fp->index, bd_prod, rx_comp_prod, rx_sge_prod); | |
355 | } | |
356 | ||
357 | ||
358 | ||
359 | static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, | |
360 | u8 storm, u16 index, u8 op, u8 update) | |
361 | { | |
362 | u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + | |
363 | COMMAND_REG_INT_ACK); | |
364 | struct igu_ack_register igu_ack; | |
365 | ||
366 | igu_ack.status_block_index = index; | |
367 | igu_ack.sb_id_and_flags = | |
368 | ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | | |
369 | (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | | |
370 | (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | | |
371 | (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); | |
372 | ||
373 | DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", | |
374 | (*(u32 *)&igu_ack), hc_addr); | |
375 | REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); | |
376 | ||
377 | /* Make sure that ACK is written */ | |
378 | mmiowb(); | |
379 | barrier(); | |
380 | } | |
381 | static inline u16 bnx2x_ack_int(struct bnx2x *bp) | |
382 | { | |
383 | u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + | |
384 | COMMAND_REG_SIMD_MASK); | |
385 | u32 result = REG_RD(bp, hc_addr); | |
386 | ||
387 | DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", | |
388 | result, hc_addr); | |
389 | ||
390 | return result; | |
391 | } | |
392 | ||
393 | /* | |
394 | * fast path service functions | |
395 | */ | |
396 | ||
397 | static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) | |
398 | { | |
399 | /* Tell compiler that consumer and producer can change */ | |
400 | barrier(); | |
401 | return (fp->tx_pkt_prod != fp->tx_pkt_cons); | |
402 | } | |
403 | ||
404 | static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) | |
405 | { | |
406 | s16 used; | |
407 | u16 prod; | |
408 | u16 cons; | |
409 | ||
410 | prod = fp->tx_bd_prod; | |
411 | cons = fp->tx_bd_cons; | |
412 | ||
413 | /* NUM_TX_RINGS = number of "next-page" entries | |
414 | It will be used as a threshold */ | |
415 | used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; | |
416 | ||
417 | #ifdef BNX2X_STOP_ON_ERROR | |
418 | WARN_ON(used < 0); | |
419 | WARN_ON(used > fp->bp->tx_ring_size); | |
420 | WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL); | |
421 | #endif | |
422 | ||
423 | return (s16)(fp->bp->tx_ring_size) - used; | |
424 | } | |
425 | ||
426 | static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) | |
427 | { | |
428 | u16 hw_cons; | |
429 | ||
430 | /* Tell compiler that status block fields can change */ | |
431 | barrier(); | |
432 | hw_cons = le16_to_cpu(*fp->tx_cons_sb); | |
433 | return hw_cons != fp->tx_pkt_cons; | |
434 | } | |
435 | ||
436 | static inline void bnx2x_free_rx_sge(struct bnx2x *bp, | |
437 | struct bnx2x_fastpath *fp, u16 index) | |
438 | { | |
439 | struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; | |
440 | struct page *page = sw_buf->page; | |
441 | struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; | |
442 | ||
443 | /* Skip "next page" elements */ | |
444 | if (!page) | |
445 | return; | |
446 | ||
447 | dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), | |
448 | SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | |
449 | __free_pages(page, PAGES_PER_SGE_SHIFT); | |
450 | ||
451 | sw_buf->page = NULL; | |
452 | sge->addr_hi = 0; | |
453 | sge->addr_lo = 0; | |
454 | } | |
455 | ||
456 | static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, | |
457 | struct bnx2x_fastpath *fp, int last) | |
458 | { | |
459 | int i; | |
460 | ||
461 | for (i = 0; i < last; i++) | |
462 | bnx2x_free_rx_sge(bp, fp, i); | |
463 | } | |
464 | ||
465 | static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, | |
466 | struct bnx2x_fastpath *fp, u16 index) | |
467 | { | |
468 | struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); | |
469 | struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; | |
470 | struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; | |
471 | dma_addr_t mapping; | |
472 | ||
473 | if (unlikely(page == NULL)) | |
474 | return -ENOMEM; | |
475 | ||
476 | mapping = dma_map_page(&bp->pdev->dev, page, 0, | |
477 | SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); | |
478 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | |
479 | __free_pages(page, PAGES_PER_SGE_SHIFT); | |
480 | return -ENOMEM; | |
481 | } | |
482 | ||
483 | sw_buf->page = page; | |
484 | dma_unmap_addr_set(sw_buf, mapping, mapping); | |
485 | ||
486 | sge->addr_hi = cpu_to_le32(U64_HI(mapping)); | |
487 | sge->addr_lo = cpu_to_le32(U64_LO(mapping)); | |
488 | ||
489 | return 0; | |
490 | } | |
491 | static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |
492 | struct bnx2x_fastpath *fp, u16 index) | |
493 | { | |
494 | struct sk_buff *skb; | |
495 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; | |
496 | struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; | |
497 | dma_addr_t mapping; | |
498 | ||
499 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); | |
500 | if (unlikely(skb == NULL)) | |
501 | return -ENOMEM; | |
502 | ||
503 | mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, | |
504 | DMA_FROM_DEVICE); | |
505 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | |
506 | dev_kfree_skb(skb); | |
507 | return -ENOMEM; | |
508 | } | |
509 | ||
510 | rx_buf->skb = skb; | |
511 | dma_unmap_addr_set(rx_buf, mapping, mapping); | |
512 | ||
513 | rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | |
514 | rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | |
515 | ||
516 | return 0; | |
517 | } | |
518 | ||
519 | /* note that we are not allocating a new skb, | |
520 | * we are just moving one from cons to prod | |
521 | * we are not creating a new mapping, | |
522 | * so there is no need to check for dma_mapping_error(). | |
523 | */ | |
524 | static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, | |
525 | struct sk_buff *skb, u16 cons, u16 prod) | |
526 | { | |
527 | struct bnx2x *bp = fp->bp; | |
528 | struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; | |
529 | struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; | |
530 | struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; | |
531 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; | |
532 | ||
533 | dma_sync_single_for_device(&bp->pdev->dev, | |
534 | dma_unmap_addr(cons_rx_buf, mapping), | |
535 | RX_COPY_THRESH, DMA_FROM_DEVICE); | |
536 | ||
537 | prod_rx_buf->skb = cons_rx_buf->skb; | |
538 | dma_unmap_addr_set(prod_rx_buf, mapping, | |
539 | dma_unmap_addr(cons_rx_buf, mapping)); | |
540 | *prod_bd = *cons_bd; | |
541 | } | |
542 | ||
543 | static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) | |
544 | { | |
545 | int i, j; | |
546 | ||
547 | for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { | |
548 | int idx = RX_SGE_CNT * i - 1; | |
549 | ||
550 | for (j = 0; j < 2; j++) { | |
551 | SGE_MASK_CLEAR_BIT(fp, idx); | |
552 | idx--; | |
553 | } | |
554 | } | |
555 | } | |
556 | ||
557 | static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) | |
558 | { | |
559 | /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ | |
560 | memset(fp->sge_mask, 0xff, | |
561 | (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); | |
562 | ||
563 | /* Clear the two last indices in the page to 1: | |
564 | these are the indices that correspond to the "next" element, | |
565 | hence will never be indicated and should be removed from | |
566 | the calculations. */ | |
567 | bnx2x_clear_sge_mask_next_elems(fp); | |
568 | } | |
569 | static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | |
570 | struct bnx2x_fastpath *fp, int last) | |
571 | { | |
572 | int i; | |
573 | ||
574 | for (i = 0; i < last; i++) { | |
575 | struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]); | |
576 | struct sk_buff *skb = rx_buf->skb; | |
577 | ||
578 | if (skb == NULL) { | |
579 | DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); | |
580 | continue; | |
581 | } | |
582 | ||
583 | if (fp->tpa_state[i] == BNX2X_TPA_START) | |
584 | dma_unmap_single(&bp->pdev->dev, | |
585 | dma_unmap_addr(rx_buf, mapping), | |
586 | bp->rx_buf_size, DMA_FROM_DEVICE); | |
587 | ||
588 | dev_kfree_skb(skb); | |
589 | rx_buf->skb = NULL; | |
590 | } | |
591 | } | |
592 | ||
593 | ||
594 | static inline void bnx2x_init_tx_ring(struct bnx2x *bp) | |
595 | { | |
596 | int i, j; | |
597 | ||
598 | for_each_queue(bp, j) { | |
599 | struct bnx2x_fastpath *fp = &bp->fp[j]; | |
600 | ||
601 | for (i = 1; i <= NUM_TX_RINGS; i++) { | |
602 | struct eth_tx_next_bd *tx_next_bd = | |
603 | &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; | |
604 | ||
605 | tx_next_bd->addr_hi = | |
606 | cpu_to_le32(U64_HI(fp->tx_desc_mapping + | |
607 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | |
608 | tx_next_bd->addr_lo = | |
609 | cpu_to_le32(U64_LO(fp->tx_desc_mapping + | |
610 | BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); | |
611 | } | |
612 | ||
613 | fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE; | |
614 | fp->tx_db.data.zero_fill1 = 0; | |
615 | fp->tx_db.data.prod = 0; | |
616 | ||
617 | fp->tx_pkt_prod = 0; | |
618 | fp->tx_pkt_cons = 0; | |
619 | fp->tx_bd_prod = 0; | |
620 | fp->tx_bd_cons = 0; | |
621 | fp->tx_cons_sb = BNX2X_TX_SB_INDEX; | |
622 | fp->tx_pkt = 0; | |
623 | } | |
624 | } | |
625 | static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) | |
626 | { | |
627 | u16 rx_cons_sb; | |
628 | ||
629 | /* Tell compiler that status block fields can change */ | |
630 | barrier(); | |
631 | rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); | |
632 | if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) | |
633 | rx_cons_sb++; | |
634 | return (fp->rx_comp_cons != rx_cons_sb); | |
635 | } | |
636 | ||
637 | /* HW Lock for shared dual port PHYs */ | |
638 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); | |
639 | void bnx2x_release_phy_lock(struct bnx2x *bp); | |
640 | ||
641 | void bnx2x_link_report(struct bnx2x *bp); | |
642 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); | |
643 | int bnx2x_tx_int(struct bnx2x_fastpath *fp); | |
644 | void bnx2x_init_rx_rings(struct bnx2x *bp); | |
645 | netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
646 | ||
647 | int bnx2x_change_mac_addr(struct net_device *dev, void *p); | |
648 | void bnx2x_tx_timeout(struct net_device *dev); | |
649 | void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp); | |
650 | void bnx2x_netif_start(struct bnx2x *bp); | |
651 | void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); | |
652 | void bnx2x_free_irq(struct bnx2x *bp, bool disable_only); | |
653 | int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); | |
654 | int bnx2x_resume(struct pci_dev *pdev); | |
655 | void bnx2x_free_skbs(struct bnx2x *bp); | |
656 | int bnx2x_change_mtu(struct net_device *dev, int new_mtu); | |
657 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); | |
658 | int bnx2x_nic_load(struct bnx2x *bp, int load_mode); | |
659 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); | |
660 | ||
661 | #endif /* BNX2X_CMN_H */ |