Commit | Line | Data |
---|---|---|
8199d3a7 CL |
1 | /***************************************************************************** |
2 | * * | |
3 | * File: sge.c * | |
559fb51b SB |
4 | * $Revision: 1.26 $ * |
5 | * $Date: 2005/06/21 18:29:48 $ * | |
8199d3a7 CL |
6 | * Description: * |
7 | * DMA engine. * | |
8 | * part of the Chelsio 10Gb Ethernet Driver. * | |
9 | * * | |
10 | * This program is free software; you can redistribute it and/or modify * | |
11 | * it under the terms of the GNU General Public License, version 2, as * | |
12 | * published by the Free Software Foundation. * | |
13 | * * | |
14 | * You should have received a copy of the GNU General Public License along * | |
15 | * with this program; if not, write to the Free Software Foundation, Inc., * | |
16 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | |
17 | * * | |
18 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * | |
19 | * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * | |
20 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * | |
21 | * * | |
22 | * http://www.chelsio.com * | |
23 | * * | |
24 | * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * | |
25 | * All rights reserved. * | |
26 | * * | |
27 | * Maintainers: maintainers@chelsio.com * | |
28 | * * | |
29 | * Authors: Dimitrios Michailidis <dm@chelsio.com> * | |
30 | * Tina Yang <tainay@chelsio.com> * | |
31 | * Felix Marti <felix@chelsio.com> * | |
32 | * Scott Bardone <sbardone@chelsio.com> * | |
33 | * Kurt Ottaway <kottaway@chelsio.com> * | |
34 | * Frank DiMambro <frank@chelsio.com> * | |
35 | * * | |
36 | * History: * | |
37 | * * | |
38 | ****************************************************************************/ | |
39 | ||
40 | #include "common.h" | |
41 | ||
42 | #include <linux/config.h> | |
43 | #include <linux/types.h> | |
44 | #include <linux/errno.h> | |
45 | #include <linux/pci.h> | |
46 | #include <linux/netdevice.h> | |
47 | #include <linux/etherdevice.h> | |
48 | #include <linux/if_vlan.h> | |
49 | #include <linux/skbuff.h> | |
50 | #include <linux/init.h> | |
51 | #include <linux/mm.h> | |
52 | #include <linux/ip.h> | |
53 | #include <linux/in.h> | |
54 | #include <linux/if_arp.h> | |
55 | ||
56 | #include "cpl5_cmd.h" | |
57 | #include "sge.h" | |
58 | #include "regs.h" | |
59 | #include "espi.h" | |
60 | ||
559fb51b SB |
61 | |
62 | #ifdef NETIF_F_TSO | |
8199d3a7 | 63 | #include <linux/tcp.h> |
559fb51b | 64 | #endif |
8199d3a7 CL |
65 | |
66 | #define SGE_CMDQ_N 2 | |
67 | #define SGE_FREELQ_N 2 | |
559fb51b | 68 | #define SGE_CMDQ0_E_N 1024 |
8199d3a7 CL |
69 | #define SGE_CMDQ1_E_N 128 |
70 | #define SGE_FREEL_SIZE 4096 | |
71 | #define SGE_JUMBO_FREEL_SIZE 512 | |
72 | #define SGE_FREEL_REFILL_THRESH 16 | |
73 | #define SGE_RESPQ_E_N 1024 | |
559fb51b SB |
74 | #define SGE_INTRTIMER_NRES 1000 |
75 | #define SGE_RX_COPY_THRES 256 | |
8199d3a7 CL |
76 | #define SGE_RX_SM_BUF_SIZE 1536 |
77 | ||
559fb51b SB |
78 | # define SGE_RX_DROP_THRES 2 |
79 | ||
80 | #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) | |
81 | ||
82 | /* | |
83 | * Period of the TX buffer reclaim timer. This timer does not need to run | |
84 | * frequently as TX buffers are usually reclaimed by new TX packets. | |
85 | */ | |
86 | #define TX_RECLAIM_PERIOD (HZ / 4) | |
8199d3a7 | 87 | |
8199d3a7 | 88 | #ifndef NET_IP_ALIGN |
559fb51b | 89 | # define NET_IP_ALIGN 2 |
8199d3a7 CL |
90 | #endif |
91 | ||
559fb51b SB |
92 | #define M_CMD_LEN 0x7fffffff |
93 | #define V_CMD_LEN(v) (v) | |
94 | #define G_CMD_LEN(v) ((v) & M_CMD_LEN) | |
95 | #define V_CMD_GEN1(v) ((v) << 31) | |
96 | #define V_CMD_GEN2(v) (v) | |
97 | #define F_CMD_DATAVALID (1 << 1) | |
98 | #define F_CMD_SOP (1 << 2) | |
99 | #define V_CMD_EOP(v) ((v) << 3) | |
100 | ||
8199d3a7 | 101 | /* |
559fb51b | 102 | * Command queue, receive buffer list, and response queue descriptors. |
8199d3a7 CL |
103 | */ |
104 | #if defined(__BIG_ENDIAN_BITFIELD) | |
105 | struct cmdQ_e { | |
559fb51b SB |
106 | u32 addr_lo; |
107 | u32 len_gen; | |
108 | u32 flags; | |
109 | u32 addr_hi; | |
8199d3a7 CL |
110 | }; |
111 | ||
112 | struct freelQ_e { | |
559fb51b SB |
113 | u32 addr_lo; |
114 | u32 len_gen; | |
115 | u32 gen2; | |
116 | u32 addr_hi; | |
8199d3a7 CL |
117 | }; |
118 | ||
119 | struct respQ_e { | |
120 | u32 Qsleeping : 4; | |
121 | u32 Cmdq1CreditReturn : 5; | |
122 | u32 Cmdq1DmaComplete : 5; | |
123 | u32 Cmdq0CreditReturn : 5; | |
124 | u32 Cmdq0DmaComplete : 5; | |
125 | u32 FreelistQid : 2; | |
126 | u32 CreditValid : 1; | |
127 | u32 DataValid : 1; | |
128 | u32 Offload : 1; | |
129 | u32 Eop : 1; | |
130 | u32 Sop : 1; | |
131 | u32 GenerationBit : 1; | |
132 | u32 BufferLength; | |
133 | }; | |
8199d3a7 CL |
134 | #elif defined(__LITTLE_ENDIAN_BITFIELD) |
135 | struct cmdQ_e { | |
559fb51b SB |
136 | u32 len_gen; |
137 | u32 addr_lo; | |
138 | u32 addr_hi; | |
139 | u32 flags; | |
8199d3a7 CL |
140 | }; |
141 | ||
142 | struct freelQ_e { | |
559fb51b SB |
143 | u32 len_gen; |
144 | u32 addr_lo; | |
145 | u32 addr_hi; | |
146 | u32 gen2; | |
8199d3a7 CL |
147 | }; |
148 | ||
149 | struct respQ_e { | |
150 | u32 BufferLength; | |
151 | u32 GenerationBit : 1; | |
152 | u32 Sop : 1; | |
153 | u32 Eop : 1; | |
154 | u32 Offload : 1; | |
155 | u32 DataValid : 1; | |
156 | u32 CreditValid : 1; | |
157 | u32 FreelistQid : 2; | |
158 | u32 Cmdq0DmaComplete : 5; | |
159 | u32 Cmdq0CreditReturn : 5; | |
160 | u32 Cmdq1DmaComplete : 5; | |
161 | u32 Cmdq1CreditReturn : 5; | |
162 | u32 Qsleeping : 4; | |
163 | } ; | |
164 | #endif | |
165 | ||
166 | /* | |
167 | * SW Context Command and Freelist Queue Descriptors | |
168 | */ | |
169 | struct cmdQ_ce { | |
170 | struct sk_buff *skb; | |
171 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | |
172 | DECLARE_PCI_UNMAP_LEN(dma_len); | |
8199d3a7 CL |
173 | }; |
174 | ||
175 | struct freelQ_ce { | |
176 | struct sk_buff *skb; | |
177 | DECLARE_PCI_UNMAP_ADDR(dma_addr); | |
178 | DECLARE_PCI_UNMAP_LEN(dma_len); | |
179 | }; | |
180 | ||
181 | /* | |
559fb51b | 182 | * SW command, freelist and response rings |
8199d3a7 CL |
183 | */ |
184 | struct cmdQ { | |
559fb51b SB |
185 | unsigned long status; /* HW DMA fetch status */ |
186 | unsigned int in_use; /* # of in-use command descriptors */ | |
187 | unsigned int size; /* # of descriptors */ | |
188 | unsigned int processed; /* total # of descs HW has processed */ | |
189 | unsigned int cleaned; /* total # of descs SW has reclaimed */ | |
190 | unsigned int stop_thres; /* SW TX queue suspend threshold */ | |
191 | u16 pidx; /* producer index (SW) */ | |
192 | u16 cidx; /* consumer index (HW) */ | |
193 | u8 genbit; /* current generation (=valid) bit */ | |
194 | u8 sop; /* is next entry start of packet? */ | |
195 | struct cmdQ_e *entries; /* HW command descriptor Q */ | |
196 | struct cmdQ_ce *centries; /* SW command context descriptor Q */ | |
197 | spinlock_t lock; /* Lock to protect cmdQ enqueuing */ | |
198 | dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ | |
8199d3a7 CL |
199 | }; |
200 | ||
201 | struct freelQ { | |
559fb51b SB |
202 | unsigned int credits; /* # of available RX buffers */ |
203 | unsigned int size; /* free list capacity */ | |
204 | u16 pidx; /* producer index (SW) */ | |
205 | u16 cidx; /* consumer index (HW) */ | |
8199d3a7 CL |
206 | u16 rx_buffer_size; /* Buffer size on this free list */ |
207 | u16 dma_offset; /* DMA offset to align IP headers */ | |
559fb51b SB |
208 | u16 recycleq_idx; /* skb recycle q to use */ |
209 | u8 genbit; /* current generation (=valid) bit */ | |
210 | struct freelQ_e *entries; /* HW freelist descriptor Q */ | |
211 | struct freelQ_ce *centries; /* SW freelist context descriptor Q */ | |
212 | dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ | |
8199d3a7 CL |
213 | }; |
214 | ||
215 | struct respQ { | |
559fb51b SB |
216 | unsigned int credits; /* credits to be returned to SGE */ |
217 | unsigned int size; /* # of response Q descriptors */ | |
218 | u16 cidx; /* consumer index (SW) */ | |
219 | u8 genbit; /* current generation(=valid) bit */ | |
8199d3a7 | 220 | struct respQ_e *entries; /* HW response descriptor Q */ |
559fb51b SB |
221 | dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ |
222 | }; | |
223 | ||
224 | /* Bit flags for cmdQ.status */ | |
225 | enum { | |
226 | CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ | |
227 | CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ | |
8199d3a7 CL |
228 | }; |
229 | ||
230 | /* | |
231 | * Main SGE data structure | |
232 | * | |
233 | * Interrupts are handled by a single CPU and it is likely that on a MP system | |
234 | * the application is migrated to another CPU. In that scenario, we try to | |
235 | * seperate the RX(in irq context) and TX state in order to decrease memory | |
236 | * contention. | |
237 | */ | |
238 | struct sge { | |
239 | struct adapter *adapter; /* adapter backpointer */ | |
559fb51b SB |
240 | struct net_device *netdev; /* netdevice backpointer */ |
241 | struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ | |
242 | struct respQ respQ; /* response Q */ | |
243 | unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ | |
8199d3a7 CL |
244 | unsigned int rx_pkt_pad; /* RX padding for L2 packets */ |
245 | unsigned int jumbo_fl; /* jumbo freelist Q index */ | |
559fb51b SB |
246 | unsigned int intrtimer_nres; /* no-resource interrupt timer */ |
247 | unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ | |
248 | struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ | |
249 | struct timer_list espibug_timer; | |
250 | unsigned int espibug_timeout; | |
251 | struct sk_buff *espibug_skb; | |
252 | u32 sge_control; /* shadow value of sge control reg */ | |
253 | struct sge_intr_counts stats; | |
254 | struct sge_port_stats port_stats[MAX_NPORTS]; | |
255 | struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; | |
8199d3a7 CL |
256 | }; |
257 | ||
8199d3a7 CL |
258 | /* |
259 | * PIO to indicate that memory mapped Q contains valid descriptor(s). | |
260 | */ | |
559fb51b | 261 | static inline void doorbell_pio(struct adapter *adapter, u32 val) |
8199d3a7 CL |
262 | { |
263 | wmb(); | |
559fb51b | 264 | writel(val, adapter->regs + A_SG_DOORBELL); |
8199d3a7 CL |
265 | } |
266 | ||
267 | /* | |
268 | * Frees all RX buffers on the freelist Q. The caller must make sure that | |
269 | * the SGE is turned off before calling this function. | |
270 | */ | |
559fb51b | 271 | static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) |
8199d3a7 | 272 | { |
559fb51b | 273 | unsigned int cidx = q->cidx; |
8199d3a7 | 274 | |
559fb51b SB |
275 | while (q->credits--) { |
276 | struct freelQ_ce *ce = &q->centries[cidx]; | |
8199d3a7 CL |
277 | |
278 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | |
279 | pci_unmap_len(ce, dma_len), | |
280 | PCI_DMA_FROMDEVICE); | |
281 | dev_kfree_skb(ce->skb); | |
282 | ce->skb = NULL; | |
559fb51b | 283 | if (++cidx == q->size) |
8199d3a7 CL |
284 | cidx = 0; |
285 | } | |
286 | } | |
287 | ||
288 | /* | |
289 | * Free RX free list and response queue resources. | |
290 | */ | |
291 | static void free_rx_resources(struct sge *sge) | |
292 | { | |
293 | struct pci_dev *pdev = sge->adapter->pdev; | |
294 | unsigned int size, i; | |
295 | ||
296 | if (sge->respQ.entries) { | |
559fb51b | 297 | size = sizeof(struct respQ_e) * sge->respQ.size; |
8199d3a7 CL |
298 | pci_free_consistent(pdev, size, sge->respQ.entries, |
299 | sge->respQ.dma_addr); | |
300 | } | |
301 | ||
302 | for (i = 0; i < SGE_FREELQ_N; i++) { | |
559fb51b | 303 | struct freelQ *q = &sge->freelQ[i]; |
8199d3a7 | 304 | |
559fb51b SB |
305 | if (q->centries) { |
306 | free_freelQ_buffers(pdev, q); | |
307 | kfree(q->centries); | |
8199d3a7 | 308 | } |
559fb51b SB |
309 | if (q->entries) { |
310 | size = sizeof(struct freelQ_e) * q->size; | |
311 | pci_free_consistent(pdev, size, q->entries, | |
312 | q->dma_addr); | |
8199d3a7 CL |
313 | } |
314 | } | |
315 | } | |
316 | ||
317 | /* | |
318 | * Allocates basic RX resources, consisting of memory mapped freelist Qs and a | |
559fb51b | 319 | * response queue. |
8199d3a7 CL |
320 | */ |
321 | static int alloc_rx_resources(struct sge *sge, struct sge_params *p) | |
322 | { | |
323 | struct pci_dev *pdev = sge->adapter->pdev; | |
324 | unsigned int size, i; | |
325 | ||
326 | for (i = 0; i < SGE_FREELQ_N; i++) { | |
559fb51b SB |
327 | struct freelQ *q = &sge->freelQ[i]; |
328 | ||
329 | q->genbit = 1; | |
330 | q->size = p->freelQ_size[i]; | |
331 | q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; | |
332 | size = sizeof(struct freelQ_e) * q->size; | |
333 | q->entries = (struct freelQ_e *) | |
334 | pci_alloc_consistent(pdev, size, &q->dma_addr); | |
335 | if (!q->entries) | |
8199d3a7 | 336 | goto err_no_mem; |
559fb51b SB |
337 | memset(q->entries, 0, size); |
338 | size = sizeof(struct freelQ_ce) * q->size; | |
339 | q->centries = kmalloc(size, GFP_KERNEL); | |
340 | if (!q->centries) | |
8199d3a7 | 341 | goto err_no_mem; |
559fb51b | 342 | memset(q->centries, 0, size); |
8199d3a7 CL |
343 | } |
344 | ||
345 | /* | |
346 | * Calculate the buffer sizes for the two free lists. FL0 accommodates | |
347 | * regular sized Ethernet frames, FL1 is sized not to exceed 16K, | |
348 | * including all the sk_buff overhead. | |
349 | * | |
350 | * Note: For T2 FL0 and FL1 are reversed. | |
351 | */ | |
352 | sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + | |
353 | sizeof(struct cpl_rx_data) + | |
354 | sge->freelQ[!sge->jumbo_fl].dma_offset; | |
355 | sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) - | |
356 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
357 | ||
559fb51b SB |
358 | /* |
359 | * Setup which skb recycle Q should be used when recycling buffers from | |
360 | * each free list. | |
361 | */ | |
362 | sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; | |
363 | sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; | |
364 | ||
8199d3a7 | 365 | sge->respQ.genbit = 1; |
559fb51b SB |
366 | sge->respQ.size = SGE_RESPQ_E_N; |
367 | sge->respQ.credits = 0; | |
368 | size = sizeof(struct respQ_e) * sge->respQ.size; | |
8199d3a7 CL |
369 | sge->respQ.entries = (struct respQ_e *) |
370 | pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); | |
371 | if (!sge->respQ.entries) | |
372 | goto err_no_mem; | |
373 | memset(sge->respQ.entries, 0, size); | |
374 | return 0; | |
375 | ||
376 | err_no_mem: | |
377 | free_rx_resources(sge); | |
378 | return -ENOMEM; | |
379 | } | |
380 | ||
381 | /* | |
559fb51b | 382 | * Reclaims n TX descriptors and frees the buffers associated with them. |
8199d3a7 | 383 | */ |
559fb51b | 384 | static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) |
8199d3a7 | 385 | { |
559fb51b | 386 | struct cmdQ_ce *ce; |
8199d3a7 | 387 | struct pci_dev *pdev = sge->adapter->pdev; |
559fb51b | 388 | unsigned int cidx = q->cidx; |
8199d3a7 | 389 | |
559fb51b SB |
390 | q->in_use -= n; |
391 | ce = &q->centries[cidx]; | |
392 | while (n--) { | |
393 | if (q->sop) | |
8199d3a7 | 394 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), |
559fb51b | 395 | pci_unmap_len(ce, dma_len), |
8199d3a7 CL |
396 | PCI_DMA_TODEVICE); |
397 | else | |
398 | pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr), | |
559fb51b | 399 | pci_unmap_len(ce, dma_len), |
8199d3a7 | 400 | PCI_DMA_TODEVICE); |
559fb51b SB |
401 | q->sop = 0; |
402 | if (ce->skb) { | |
403 | dev_kfree_skb(ce->skb); | |
404 | q->sop = 1; | |
405 | } | |
8199d3a7 | 406 | ce++; |
559fb51b | 407 | if (++cidx == q->size) { |
8199d3a7 | 408 | cidx = 0; |
559fb51b | 409 | ce = q->centries; |
8199d3a7 CL |
410 | } |
411 | } | |
559fb51b | 412 | q->cidx = cidx; |
8199d3a7 CL |
413 | } |
414 | ||
415 | /* | |
416 | * Free TX resources. | |
417 | * | |
418 | * Assumes that SGE is stopped and all interrupts are disabled. | |
419 | */ | |
420 | static void free_tx_resources(struct sge *sge) | |
421 | { | |
422 | struct pci_dev *pdev = sge->adapter->pdev; | |
423 | unsigned int size, i; | |
424 | ||
425 | for (i = 0; i < SGE_CMDQ_N; i++) { | |
559fb51b | 426 | struct cmdQ *q = &sge->cmdQ[i]; |
8199d3a7 | 427 | |
559fb51b SB |
428 | if (q->centries) { |
429 | if (q->in_use) | |
430 | free_cmdQ_buffers(sge, q, q->in_use); | |
431 | kfree(q->centries); | |
8199d3a7 | 432 | } |
559fb51b SB |
433 | if (q->entries) { |
434 | size = sizeof(struct cmdQ_e) * q->size; | |
435 | pci_free_consistent(pdev, size, q->entries, | |
436 | q->dma_addr); | |
8199d3a7 CL |
437 | } |
438 | } | |
439 | } | |
440 | ||
441 | /* | |
442 | * Allocates basic TX resources, consisting of memory mapped command Qs. | |
443 | */ | |
444 | static int alloc_tx_resources(struct sge *sge, struct sge_params *p) | |
445 | { | |
446 | struct pci_dev *pdev = sge->adapter->pdev; | |
447 | unsigned int size, i; | |
448 | ||
449 | for (i = 0; i < SGE_CMDQ_N; i++) { | |
559fb51b SB |
450 | struct cmdQ *q = &sge->cmdQ[i]; |
451 | ||
452 | q->genbit = 1; | |
453 | q->sop = 1; | |
454 | q->size = p->cmdQ_size[i]; | |
455 | q->in_use = 0; | |
456 | q->status = 0; | |
457 | q->processed = q->cleaned = 0; | |
458 | q->stop_thres = 0; | |
459 | spin_lock_init(&q->lock); | |
460 | size = sizeof(struct cmdQ_e) * q->size; | |
461 | q->entries = (struct cmdQ_e *) | |
462 | pci_alloc_consistent(pdev, size, &q->dma_addr); | |
463 | if (!q->entries) | |
8199d3a7 | 464 | goto err_no_mem; |
559fb51b SB |
465 | memset(q->entries, 0, size); |
466 | size = sizeof(struct cmdQ_ce) * q->size; | |
467 | q->centries = kmalloc(size, GFP_KERNEL); | |
468 | if (!q->centries) | |
8199d3a7 | 469 | goto err_no_mem; |
559fb51b | 470 | memset(q->centries, 0, size); |
8199d3a7 CL |
471 | } |
472 | ||
559fb51b SB |
473 | /* |
474 | * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE | |
475 | * only. For queue 0 set the stop threshold so we can handle one more | |
476 | * packet from each port, plus reserve an additional 24 entries for | |
477 | * Ethernet packets only. Queue 1 never suspends nor do we reserve | |
478 | * space for Ethernet packets. | |
479 | */ | |
480 | sge->cmdQ[0].stop_thres = sge->adapter->params.nports * | |
481 | (MAX_SKB_FRAGS + 1); | |
8199d3a7 CL |
482 | return 0; |
483 | ||
484 | err_no_mem: | |
485 | free_tx_resources(sge); | |
486 | return -ENOMEM; | |
487 | } | |
488 | ||
489 | static inline void setup_ring_params(struct adapter *adapter, u64 addr, | |
490 | u32 size, int base_reg_lo, | |
491 | int base_reg_hi, int size_reg) | |
492 | { | |
559fb51b SB |
493 | writel((u32)addr, adapter->regs + base_reg_lo); |
494 | writel(addr >> 32, adapter->regs + base_reg_hi); | |
495 | writel(size, adapter->regs + size_reg); | |
8199d3a7 CL |
496 | } |
497 | ||
498 | /* | |
499 | * Enable/disable VLAN acceleration. | |
500 | */ | |
501 | void t1_set_vlan_accel(struct adapter *adapter, int on_off) | |
502 | { | |
503 | struct sge *sge = adapter->sge; | |
504 | ||
505 | sge->sge_control &= ~F_VLAN_XTRACT; | |
506 | if (on_off) | |
507 | sge->sge_control |= F_VLAN_XTRACT; | |
508 | if (adapter->open_device_map) { | |
559fb51b SB |
509 | writel(sge->sge_control, adapter->regs + A_SG_CONTROL); |
510 | readl(adapter->regs + A_SG_CONTROL); /* flush */ | |
8199d3a7 CL |
511 | } |
512 | } | |
513 | ||
8199d3a7 CL |
514 | /* |
515 | * Programs the various SGE registers. However, the engine is not yet enabled, | |
516 | * but sge->sge_control is setup and ready to go. | |
517 | */ | |
518 | static void configure_sge(struct sge *sge, struct sge_params *p) | |
519 | { | |
520 | struct adapter *ap = sge->adapter; | |
559fb51b SB |
521 | |
522 | writel(0, ap->regs + A_SG_CONTROL); | |
523 | setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, | |
8199d3a7 | 524 | A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); |
559fb51b | 525 | setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, |
8199d3a7 CL |
526 | A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); |
527 | setup_ring_params(ap, sge->freelQ[0].dma_addr, | |
559fb51b | 528 | sge->freelQ[0].size, A_SG_FL0BASELWR, |
8199d3a7 CL |
529 | A_SG_FL0BASEUPR, A_SG_FL0SIZE); |
530 | setup_ring_params(ap, sge->freelQ[1].dma_addr, | |
559fb51b | 531 | sge->freelQ[1].size, A_SG_FL1BASELWR, |
8199d3a7 CL |
532 | A_SG_FL1BASEUPR, A_SG_FL1SIZE); |
533 | ||
534 | /* The threshold comparison uses <. */ | |
559fb51b | 535 | writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); |
8199d3a7 | 536 | |
559fb51b SB |
537 | setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, |
538 | A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); | |
539 | writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); | |
8199d3a7 CL |
540 | |
541 | sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | | |
542 | F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | | |
543 | V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | | |
559fb51b | 544 | F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS | |
8199d3a7 CL |
545 | V_RX_PKT_OFFSET(sge->rx_pkt_pad); |
546 | ||
547 | #if defined(__BIG_ENDIAN_BITFIELD) | |
548 | sge->sge_control |= F_ENABLE_BIG_ENDIAN; | |
549 | #endif | |
550 | ||
559fb51b SB |
551 | /* Initialize no-resource timer */ |
552 | sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); | |
553 | ||
554 | t1_sge_set_coalesce_params(sge, p); | |
8199d3a7 CL |
555 | } |
556 | ||
557 | /* | |
558 | * Return the payload capacity of the jumbo free-list buffers. | |
559 | */ | |
560 | static inline unsigned int jumbo_payload_capacity(const struct sge *sge) | |
561 | { | |
562 | return sge->freelQ[sge->jumbo_fl].rx_buffer_size - | |
559fb51b SB |
563 | sge->freelQ[sge->jumbo_fl].dma_offset - |
564 | sizeof(struct cpl_rx_data); | |
8199d3a7 CL |
565 | } |
566 | ||
567 | /* | |
568 | * Frees all SGE related resources and the sge structure itself | |
569 | */ | |
570 | void t1_sge_destroy(struct sge *sge) | |
571 | { | |
559fb51b SB |
572 | if (sge->espibug_skb) |
573 | kfree_skb(sge->espibug_skb); | |
574 | ||
8199d3a7 CL |
575 | free_tx_resources(sge); |
576 | free_rx_resources(sge); | |
577 | kfree(sge); | |
578 | } | |
579 | ||
580 | /* | |
581 | * Allocates new RX buffers on the freelist Q (and tracks them on the freelist | |
582 | * context Q) until the Q is full or alloc_skb fails. | |
583 | * | |
584 | * It is possible that the generation bits already match, indicating that the | |
585 | * buffer is already valid and nothing needs to be done. This happens when we | |
586 | * copied a received buffer into a new sk_buff during the interrupt processing. | |
587 | * | |
588 | * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), | |
589 | * we specify a RX_OFFSET in order to make sure that the IP header is 4B | |
590 | * aligned. | |
591 | */ | |
559fb51b | 592 | static void refill_free_list(struct sge *sge, struct freelQ *q) |
8199d3a7 CL |
593 | { |
594 | struct pci_dev *pdev = sge->adapter->pdev; | |
559fb51b SB |
595 | struct freelQ_ce *ce = &q->centries[q->pidx]; |
596 | struct freelQ_e *e = &q->entries[q->pidx]; | |
597 | unsigned int dma_len = q->rx_buffer_size - q->dma_offset; | |
8199d3a7 CL |
598 | |
599 | ||
559fb51b SB |
600 | while (q->credits < q->size) { |
601 | struct sk_buff *skb; | |
602 | dma_addr_t mapping; | |
8199d3a7 | 603 | |
559fb51b SB |
604 | skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); |
605 | if (!skb) | |
606 | break; | |
607 | ||
608 | skb_reserve(skb, q->dma_offset); | |
609 | mapping = pci_map_single(pdev, skb->data, dma_len, | |
610 | PCI_DMA_FROMDEVICE); | |
611 | ce->skb = skb; | |
612 | pci_unmap_addr_set(ce, dma_addr, mapping); | |
613 | pci_unmap_len_set(ce, dma_len, dma_len); | |
614 | e->addr_lo = (u32)mapping; | |
615 | e->addr_hi = (u64)mapping >> 32; | |
616 | e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); | |
617 | wmb(); | |
618 | e->gen2 = V_CMD_GEN2(q->genbit); | |
8199d3a7 CL |
619 | |
620 | e++; | |
621 | ce++; | |
559fb51b SB |
622 | if (++q->pidx == q->size) { |
623 | q->pidx = 0; | |
624 | q->genbit ^= 1; | |
625 | ce = q->centries; | |
626 | e = q->entries; | |
8199d3a7 | 627 | } |
559fb51b | 628 | q->credits++; |
8199d3a7 CL |
629 | } |
630 | ||
631 | } | |
632 | ||
633 | /* | |
559fb51b SB |
634 | * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 |
635 | * of both rings, we go into 'few interrupt mode' in order to give the system | |
636 | * time to free up resources. | |
8199d3a7 CL |
637 | */ |
638 | static void freelQs_empty(struct sge *sge) | |
639 | { | |
559fb51b SB |
640 | struct adapter *adapter = sge->adapter; |
641 | u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); | |
8199d3a7 CL |
642 | u32 irqholdoff_reg; |
643 | ||
644 | refill_free_list(sge, &sge->freelQ[0]); | |
645 | refill_free_list(sge, &sge->freelQ[1]); | |
646 | ||
559fb51b SB |
647 | if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && |
648 | sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { | |
8199d3a7 | 649 | irq_reg |= F_FL_EXHAUSTED; |
559fb51b | 650 | irqholdoff_reg = sge->fixed_intrtimer; |
8199d3a7 CL |
651 | } else { |
652 | /* Clear the F_FL_EXHAUSTED interrupts for now */ | |
653 | irq_reg &= ~F_FL_EXHAUSTED; | |
654 | irqholdoff_reg = sge->intrtimer_nres; | |
655 | } | |
559fb51b SB |
656 | writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); |
657 | writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); | |
8199d3a7 CL |
658 | |
659 | /* We reenable the Qs to force a freelist GTS interrupt later */ | |
559fb51b | 660 | doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); |
8199d3a7 CL |
661 | } |
662 | ||
663 | #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) | |
664 | #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) | |
665 | #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ | |
666 | F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) | |
667 | ||
668 | /* | |
669 | * Disable SGE Interrupts | |
670 | */ | |
671 | void t1_sge_intr_disable(struct sge *sge) | |
672 | { | |
559fb51b | 673 | u32 val = readl(sge->adapter->regs + A_PL_ENABLE); |
8199d3a7 | 674 | |
559fb51b SB |
675 | writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); |
676 | writel(0, sge->adapter->regs + A_SG_INT_ENABLE); | |
8199d3a7 CL |
677 | } |
678 | ||
679 | /* | |
680 | * Enable SGE interrupts. | |
681 | */ | |
682 | void t1_sge_intr_enable(struct sge *sge) | |
683 | { | |
684 | u32 en = SGE_INT_ENABLE; | |
559fb51b | 685 | u32 val = readl(sge->adapter->regs + A_PL_ENABLE); |
8199d3a7 CL |
686 | |
687 | if (sge->adapter->flags & TSO_CAPABLE) | |
688 | en &= ~F_PACKET_TOO_BIG; | |
559fb51b SB |
689 | writel(en, sge->adapter->regs + A_SG_INT_ENABLE); |
690 | writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); | |
8199d3a7 CL |
691 | } |
692 | ||
693 | /* | |
694 | * Clear SGE interrupts. | |
695 | */ | |
696 | void t1_sge_intr_clear(struct sge *sge) | |
697 | { | |
559fb51b SB |
698 | writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); |
699 | writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); | |
8199d3a7 CL |
700 | } |
701 | ||
702 | /* | |
703 | * SGE 'Error' interrupt handler | |
704 | */ | |
705 | int t1_sge_intr_error_handler(struct sge *sge) | |
706 | { | |
707 | struct adapter *adapter = sge->adapter; | |
559fb51b | 708 | u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); |
8199d3a7 CL |
709 | |
710 | if (adapter->flags & TSO_CAPABLE) | |
711 | cause &= ~F_PACKET_TOO_BIG; | |
712 | if (cause & F_RESPQ_EXHAUSTED) | |
559fb51b | 713 | sge->stats.respQ_empty++; |
8199d3a7 | 714 | if (cause & F_RESPQ_OVERFLOW) { |
559fb51b | 715 | sge->stats.respQ_overflow++; |
8199d3a7 CL |
716 | CH_ALERT("%s: SGE response queue overflow\n", |
717 | adapter->name); | |
718 | } | |
719 | if (cause & F_FL_EXHAUSTED) { | |
559fb51b | 720 | sge->stats.freelistQ_empty++; |
8199d3a7 CL |
721 | freelQs_empty(sge); |
722 | } | |
723 | if (cause & F_PACKET_TOO_BIG) { | |
559fb51b | 724 | sge->stats.pkt_too_big++; |
8199d3a7 CL |
725 | CH_ALERT("%s: SGE max packet size exceeded\n", |
726 | adapter->name); | |
727 | } | |
728 | if (cause & F_PACKET_MISMATCH) { | |
559fb51b | 729 | sge->stats.pkt_mismatch++; |
8199d3a7 CL |
730 | CH_ALERT("%s: SGE packet mismatch\n", adapter->name); |
731 | } | |
732 | if (cause & SGE_INT_FATAL) | |
733 | t1_fatal_err(adapter); | |
734 | ||
559fb51b | 735 | writel(cause, adapter->regs + A_SG_INT_CAUSE); |
8199d3a7 CL |
736 | return 0; |
737 | } | |
738 | ||
559fb51b SB |
739 | const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge) |
740 | { | |
741 | return &sge->stats; | |
742 | } | |
743 | ||
744 | const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port) | |
745 | { | |
746 | return &sge->port_stats[port]; | |
747 | } | |
748 | ||
749 | /** | |
750 | * recycle_fl_buf - recycle a free list buffer | |
751 | * @fl: the free list | |
752 | * @idx: index of buffer to recycle | |
8199d3a7 | 753 | * |
559fb51b SB |
754 | * Recycles the specified buffer on the given free list by adding it at |
755 | * the next available slot on the list. | |
8199d3a7 | 756 | */ |
559fb51b | 757 | static void recycle_fl_buf(struct freelQ *fl, int idx) |
8199d3a7 | 758 | { |
559fb51b SB |
759 | struct freelQ_e *from = &fl->entries[idx]; |
760 | struct freelQ_e *to = &fl->entries[fl->pidx]; | |
8199d3a7 | 761 | |
559fb51b SB |
762 | fl->centries[fl->pidx] = fl->centries[idx]; |
763 | to->addr_lo = from->addr_lo; | |
764 | to->addr_hi = from->addr_hi; | |
765 | to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); | |
766 | wmb(); | |
767 | to->gen2 = V_CMD_GEN2(fl->genbit); | |
768 | fl->credits++; | |
8199d3a7 | 769 | |
559fb51b SB |
770 | if (++fl->pidx == fl->size) { |
771 | fl->pidx = 0; | |
772 | fl->genbit ^= 1; | |
8199d3a7 | 773 | } |
559fb51b | 774 | } |
8199d3a7 | 775 | |
559fb51b SB |
776 | /** |
777 | * get_packet - return the next ingress packet buffer | |
778 | * @pdev: the PCI device that received the packet | |
779 | * @fl: the SGE free list holding the packet | |
780 | * @len: the actual packet length, excluding any SGE padding | |
781 | * @dma_pad: padding at beginning of buffer left by SGE DMA | |
782 | * @skb_pad: padding to be used if the packet is copied | |
783 | * @copy_thres: length threshold under which a packet should be copied | |
784 | * @drop_thres: # of remaining buffers before we start dropping packets | |
785 | * | |
786 | * Get the next packet from a free list and complete setup of the | |
787 | * sk_buff. If the packet is small we make a copy and recycle the | |
788 | * original buffer, otherwise we use the original buffer itself. If a | |
789 | * positive drop threshold is supplied packets are dropped and their | |
790 | * buffers recycled if (a) the number of remaining buffers is under the | |
791 | * threshold and the packet is too big to copy, or (b) the packet should | |
792 | * be copied but there is no memory for the copy. | |
793 | */ | |
794 | static inline struct sk_buff *get_packet(struct pci_dev *pdev, | |
795 | struct freelQ *fl, unsigned int len, | |
796 | int dma_pad, int skb_pad, | |
797 | unsigned int copy_thres, | |
798 | unsigned int drop_thres) | |
799 | { | |
800 | struct sk_buff *skb; | |
801 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; | |
802 | ||
803 | if (len < copy_thres) { | |
804 | skb = alloc_skb(len + skb_pad, GFP_ATOMIC); | |
805 | if (likely(skb != NULL)) { | |
806 | skb_reserve(skb, skb_pad); | |
807 | skb_put(skb, len); | |
808 | pci_dma_sync_single_for_cpu(pdev, | |
809 | pci_unmap_addr(ce, dma_addr), | |
810 | pci_unmap_len(ce, dma_len), | |
811 | PCI_DMA_FROMDEVICE); | |
812 | memcpy(skb->data, ce->skb->data + dma_pad, len); | |
813 | pci_dma_sync_single_for_device(pdev, | |
814 | pci_unmap_addr(ce, dma_addr), | |
815 | pci_unmap_len(ce, dma_len), | |
816 | PCI_DMA_FROMDEVICE); | |
817 | } else if (!drop_thres) | |
818 | goto use_orig_buf; | |
8199d3a7 | 819 | |
559fb51b SB |
820 | recycle_fl_buf(fl, fl->cidx); |
821 | return skb; | |
8199d3a7 CL |
822 | } |
823 | ||
559fb51b SB |
824 | if (fl->credits < drop_thres) { |
825 | recycle_fl_buf(fl, fl->cidx); | |
826 | return NULL; | |
827 | } | |
8199d3a7 | 828 | |
559fb51b SB |
829 | use_orig_buf: |
830 | pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), | |
831 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); | |
832 | skb = ce->skb; | |
833 | skb_reserve(skb, dma_pad); | |
834 | skb_put(skb, len); | |
835 | return skb; | |
836 | } | |
8199d3a7 | 837 | |
559fb51b SB |
838 | /** |
839 | * unexpected_offload - handle an unexpected offload packet | |
840 | * @adapter: the adapter | |
841 | * @fl: the free list that received the packet | |
842 | * | |
843 | * Called when we receive an unexpected offload packet (e.g., the TOE | |
844 | * function is disabled or the card is a NIC). Prints a message and | |
845 | * recycles the buffer. | |
846 | */ | |
847 | static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) | |
848 | { | |
849 | struct freelQ_ce *ce = &fl->centries[fl->cidx]; | |
850 | struct sk_buff *skb = ce->skb; | |
851 | ||
852 | pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), | |
853 | pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); | |
854 | CH_ERR("%s: unexpected offload packet, cmd %u\n", | |
855 | adapter->name, *skb->data); | |
856 | recycle_fl_buf(fl, fl->cidx); | |
8199d3a7 CL |
857 | } |
858 | ||
859 | /* | |
559fb51b SB |
860 | * Write the command descriptors to transmit the given skb starting at |
861 | * descriptor pidx with the given generation. | |
8199d3a7 | 862 | */ |
559fb51b SB |
863 | static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, |
864 | unsigned int pidx, unsigned int gen, | |
865 | struct cmdQ *q) | |
8199d3a7 | 866 | { |
559fb51b SB |
867 | dma_addr_t mapping; |
868 | struct cmdQ_e *e, *e1; | |
869 | struct cmdQ_ce *ce; | |
870 | unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags; | |
871 | ||
872 | mapping = pci_map_single(adapter->pdev, skb->data, | |
873 | skb->len - skb->data_len, PCI_DMA_TODEVICE); | |
874 | ce = &q->centries[pidx]; | |
875 | ce->skb = NULL; | |
876 | pci_unmap_addr_set(ce, dma_addr, mapping); | |
877 | pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); | |
8199d3a7 | 878 | |
559fb51b SB |
879 | flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) | |
880 | V_CMD_GEN2(gen); | |
881 | e = &q->entries[pidx]; | |
882 | e->addr_lo = (u32)mapping; | |
883 | e->addr_hi = (u64)mapping >> 32; | |
884 | e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen); | |
885 | for (e1 = e, i = 0; nfrags--; i++) { | |
886 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
8199d3a7 | 887 | |
559fb51b SB |
888 | ce++; |
889 | e1++; | |
890 | if (++pidx == q->size) { | |
891 | pidx = 0; | |
892 | gen ^= 1; | |
893 | ce = q->centries; | |
894 | e1 = q->entries; | |
8199d3a7 | 895 | } |
8199d3a7 | 896 | |
559fb51b SB |
897 | mapping = pci_map_page(adapter->pdev, frag->page, |
898 | frag->page_offset, frag->size, | |
899 | PCI_DMA_TODEVICE); | |
900 | ce->skb = NULL; | |
901 | pci_unmap_addr_set(ce, dma_addr, mapping); | |
902 | pci_unmap_len_set(ce, dma_len, frag->size); | |
903 | ||
904 | e1->addr_lo = (u32)mapping; | |
905 | e1->addr_hi = (u64)mapping >> 32; | |
906 | e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen); | |
907 | e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) | | |
908 | V_CMD_GEN2(gen); | |
8199d3a7 CL |
909 | } |
910 | ||
559fb51b SB |
911 | ce->skb = skb; |
912 | wmb(); | |
913 | e->flags = flags; | |
914 | } | |
8199d3a7 | 915 | |
559fb51b SB |
916 | /* |
917 | * Clean up completed Tx buffers. | |
918 | */ | |
919 | static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) | |
920 | { | |
921 | unsigned int reclaim = q->processed - q->cleaned; | |
8199d3a7 | 922 | |
559fb51b SB |
923 | if (reclaim) { |
924 | free_cmdQ_buffers(sge, q, reclaim); | |
925 | q->cleaned += reclaim; | |
8199d3a7 | 926 | } |
559fb51b | 927 | } |
8199d3a7 | 928 | |
559fb51b SB |
929 | #ifndef SET_ETHTOOL_OPS |
930 | # define __netif_rx_complete(dev) netif_rx_complete(dev) | |
931 | #endif | |
8199d3a7 | 932 | |
559fb51b SB |
933 | /* |
934 | * We cannot use the standard netif_rx_schedule_prep() because we have multiple | |
935 | * ports plus the TOE all multiplexing onto a single response queue, therefore | |
936 | * accepting new responses cannot depend on the state of any particular port. | |
937 | * So define our own equivalent that omits the netif_running() test. | |
938 | */ | |
939 | static inline int napi_schedule_prep(struct net_device *dev) | |
940 | { | |
941 | return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); | |
8199d3a7 CL |
942 | } |
943 | ||
944 | ||
559fb51b SB |
945 | /** |
946 | * sge_rx - process an ingress ethernet packet | |
947 | * @sge: the sge structure | |
948 | * @fl: the free list that contains the packet buffer | |
949 | * @len: the packet length | |
8199d3a7 | 950 | * |
559fb51b | 951 | * Process an ingress ethernet pakcet and deliver it to the stack. |
8199d3a7 | 952 | */ |
559fb51b | 953 | static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) |
8199d3a7 | 954 | { |
559fb51b SB |
955 | struct sk_buff *skb; |
956 | struct cpl_rx_pkt *p; | |
957 | struct adapter *adapter = sge->adapter; | |
8199d3a7 | 958 | |
559fb51b SB |
959 | sge->stats.ethernet_pkts++; |
960 | skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad, | |
961 | sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES, | |
962 | SGE_RX_DROP_THRES); | |
963 | if (!skb) { | |
964 | sge->port_stats[0].rx_drops++; /* charge only port 0 for now */ | |
965 | return 0; | |
8199d3a7 | 966 | } |
559fb51b SB |
967 | |
968 | p = (struct cpl_rx_pkt *)skb->data; | |
969 | skb_pull(skb, sizeof(*p)); | |
970 | skb->dev = adapter->port[p->iff].dev; | |
971 | skb->dev->last_rx = jiffies; | |
972 | skb->protocol = eth_type_trans(skb, skb->dev); | |
973 | if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && | |
974 | skb->protocol == htons(ETH_P_IP) && | |
975 | (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { | |
976 | sge->port_stats[p->iff].rx_cso_good++; | |
977 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
978 | } else | |
979 | skb->ip_summed = CHECKSUM_NONE; | |
980 | ||
981 | if (unlikely(adapter->vlan_grp && p->vlan_valid)) { | |
982 | sge->port_stats[p->iff].vlan_xtract++; | |
983 | if (adapter->params.sge.polling) | |
984 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, | |
985 | ntohs(p->vlan)); | |
986 | else | |
987 | vlan_hwaccel_rx(skb, adapter->vlan_grp, | |
988 | ntohs(p->vlan)); | |
989 | } else if (adapter->params.sge.polling) | |
990 | netif_receive_skb(skb); | |
991 | else | |
992 | netif_rx(skb); | |
993 | return 0; | |
8199d3a7 CL |
994 | } |
995 | ||
996 | /* | |
559fb51b | 997 | * Returns true if a command queue has enough available descriptors that |
8199d3a7 CL |
998 | * we can resume Tx operation after temporarily disabling its packet queue. |
999 | */ | |
559fb51b | 1000 | static inline int enough_free_Tx_descs(const struct cmdQ *q) |
8199d3a7 | 1001 | { |
559fb51b SB |
1002 | unsigned int r = q->processed - q->cleaned; |
1003 | ||
1004 | return q->in_use - r < (q->size >> 1); | |
8199d3a7 CL |
1005 | } |
1006 | ||
1007 | /* | |
559fb51b SB |
1008 | * Called when sufficient space has become available in the SGE command queues |
1009 | * after the Tx packet schedulers have been suspended to restart the Tx path. | |
8199d3a7 | 1010 | */ |
559fb51b | 1011 | static void restart_tx_queues(struct sge *sge) |
8199d3a7 | 1012 | { |
559fb51b | 1013 | struct adapter *adap = sge->adapter; |
8199d3a7 | 1014 | |
559fb51b SB |
1015 | if (enough_free_Tx_descs(&sge->cmdQ[0])) { |
1016 | int i; | |
1017 | ||
1018 | for_each_port(adap, i) { | |
1019 | struct net_device *nd = adap->port[i].dev; | |
1020 | ||
1021 | if (test_and_clear_bit(nd->if_port, | |
1022 | &sge->stopped_tx_queues) && | |
1023 | netif_running(nd)) { | |
1024 | sge->stats.cmdQ_restarted[3]++; | |
1025 | netif_wake_queue(nd); | |
1026 | } | |
1027 | } | |
1028 | } | |
1029 | } | |
1030 | ||
1031 | /* | |
1032 | * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 | |
1033 | * information. | |
1034 | */ | |
1035 | static unsigned int update_tx_info(struct adapter *adapter, | |
1036 | unsigned int flags, | |
1037 | unsigned int pr0) | |
1038 | { | |
1039 | struct sge *sge = adapter->sge; | |
1040 | struct cmdQ *cmdq = &sge->cmdQ[0]; | |
8199d3a7 | 1041 | |
559fb51b | 1042 | cmdq->processed += pr0; |
8199d3a7 | 1043 | |
559fb51b SB |
1044 | if (flags & F_CMDQ0_ENABLE) { |
1045 | clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); | |
1046 | ||
1047 | if (cmdq->cleaned + cmdq->in_use != cmdq->processed && | |
1048 | !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { | |
1049 | set_bit(CMDQ_STAT_RUNNING, &cmdq->status); | |
1050 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); | |
1051 | } | |
1052 | flags &= ~F_CMDQ0_ENABLE; | |
1053 | } | |
1054 | ||
1055 | if (unlikely(sge->stopped_tx_queues != 0)) | |
1056 | restart_tx_queues(sge); | |
8199d3a7 | 1057 | |
559fb51b SB |
1058 | return flags; |
1059 | } | |
8199d3a7 | 1060 | |
559fb51b SB |
1061 | /* |
1062 | * Process SGE responses, up to the supplied budget. Returns the number of | |
1063 | * responses processed. A negative budget is effectively unlimited. | |
1064 | */ | |
1065 | static int process_responses(struct adapter *adapter, int budget) | |
1066 | { | |
1067 | struct sge *sge = adapter->sge; | |
1068 | struct respQ *q = &sge->respQ; | |
1069 | struct respQ_e *e = &q->entries[q->cidx]; | |
1070 | int budget_left = budget; | |
1071 | unsigned int flags = 0; | |
1072 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; | |
1073 | ||
1074 | ||
1075 | while (likely(budget_left && e->GenerationBit == q->genbit)) { | |
1076 | flags |= e->Qsleeping; | |
1077 | ||
1078 | cmdq_processed[0] += e->Cmdq0CreditReturn; | |
1079 | cmdq_processed[1] += e->Cmdq1CreditReturn; | |
1080 | ||
1081 | /* We batch updates to the TX side to avoid cacheline | |
1082 | * ping-pong of TX state information on MP where the sender | |
1083 | * might run on a different CPU than this function... | |
1084 | */ | |
1085 | if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) { | |
1086 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); | |
1087 | cmdq_processed[0] = 0; | |
1088 | } | |
1089 | if (unlikely(cmdq_processed[1] > 16)) { | |
1090 | sge->cmdQ[1].processed += cmdq_processed[1]; | |
1091 | cmdq_processed[1] = 0; | |
8199d3a7 CL |
1092 | } |
1093 | if (likely(e->DataValid)) { | |
559fb51b SB |
1094 | struct freelQ *fl = &sge->freelQ[e->FreelistQid]; |
1095 | ||
1096 | if (unlikely(!e->Sop || !e->Eop)) | |
8199d3a7 | 1097 | BUG(); |
559fb51b SB |
1098 | if (unlikely(e->Offload)) |
1099 | unexpected_offload(adapter, fl); | |
1100 | else | |
1101 | sge_rx(sge, fl, e->BufferLength); | |
1102 | ||
1103 | /* | |
1104 | * Note: this depends on each packet consuming a | |
1105 | * single free-list buffer; cf. the BUG above. | |
1106 | */ | |
1107 | if (++fl->cidx == fl->size) | |
1108 | fl->cidx = 0; | |
1109 | if (unlikely(--fl->credits < | |
1110 | fl->size - SGE_FREEL_REFILL_THRESH)) | |
1111 | refill_free_list(sge, fl); | |
1112 | } else | |
1113 | sge->stats.pure_rsps++; | |
8199d3a7 | 1114 | |
8199d3a7 | 1115 | e++; |
559fb51b SB |
1116 | if (unlikely(++q->cidx == q->size)) { |
1117 | q->cidx = 0; | |
1118 | q->genbit ^= 1; | |
1119 | e = q->entries; | |
1120 | } | |
1121 | prefetch(e); | |
1122 | ||
1123 | if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { | |
1124 | writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); | |
1125 | q->credits = 0; | |
8199d3a7 | 1126 | } |
559fb51b | 1127 | --budget_left; |
8199d3a7 CL |
1128 | } |
1129 | ||
559fb51b SB |
1130 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
1131 | sge->cmdQ[1].processed += cmdq_processed[1]; | |
8199d3a7 | 1132 | |
559fb51b SB |
1133 | budget -= budget_left; |
1134 | return budget; | |
1135 | } | |
8199d3a7 | 1136 | |
559fb51b SB |
1137 | /* |
1138 | * A simpler version of process_responses() that handles only pure (i.e., | |
1139 | * non data-carrying) responses. Such respones are too light-weight to justify | |
1140 | * calling a softirq when using NAPI, so we handle them specially in hard | |
1141 | * interrupt context. The function is called with a pointer to a response, | |
1142 | * which the caller must ensure is a valid pure response. Returns 1 if it | |
1143 | * encounters a valid data-carrying response, 0 otherwise. | |
1144 | */ | |
1145 | static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) | |
1146 | { | |
1147 | struct sge *sge = adapter->sge; | |
1148 | struct respQ *q = &sge->respQ; | |
1149 | unsigned int flags = 0; | |
1150 | unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; | |
8199d3a7 | 1151 | |
559fb51b SB |
1152 | do { |
1153 | flags |= e->Qsleeping; | |
8199d3a7 | 1154 | |
559fb51b SB |
1155 | cmdq_processed[0] += e->Cmdq0CreditReturn; |
1156 | cmdq_processed[1] += e->Cmdq1CreditReturn; | |
1157 | ||
1158 | e++; | |
1159 | if (unlikely(++q->cidx == q->size)) { | |
1160 | q->cidx = 0; | |
1161 | q->genbit ^= 1; | |
1162 | e = q->entries; | |
8199d3a7 | 1163 | } |
559fb51b | 1164 | prefetch(e); |
8199d3a7 | 1165 | |
559fb51b SB |
1166 | if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { |
1167 | writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); | |
1168 | q->credits = 0; | |
8199d3a7 | 1169 | } |
559fb51b SB |
1170 | sge->stats.pure_rsps++; |
1171 | } while (e->GenerationBit == q->genbit && !e->DataValid); | |
8199d3a7 | 1172 | |
559fb51b SB |
1173 | flags = update_tx_info(adapter, flags, cmdq_processed[0]); |
1174 | sge->cmdQ[1].processed += cmdq_processed[1]; | |
8199d3a7 | 1175 | |
559fb51b | 1176 | return e->GenerationBit == q->genbit; |
8199d3a7 CL |
1177 | } |
1178 | ||
1179 | /* | |
559fb51b SB |
1180 | * Handler for new data events when using NAPI. This does not need any locking |
1181 | * or protection from interrupts as data interrupts are off at this point and | |
1182 | * other adapter interrupts do not interfere. | |
8199d3a7 | 1183 | */ |
559fb51b | 1184 | static int t1_poll(struct net_device *dev, int *budget) |
8199d3a7 | 1185 | { |
559fb51b SB |
1186 | struct adapter *adapter = dev->priv; |
1187 | int effective_budget = min(*budget, dev->quota); | |
1188 | ||
1189 | int work_done = process_responses(adapter, effective_budget); | |
1190 | *budget -= work_done; | |
1191 | dev->quota -= work_done; | |
8199d3a7 | 1192 | |
559fb51b SB |
1193 | if (work_done >= effective_budget) |
1194 | return 1; | |
1195 | ||
1196 | __netif_rx_complete(dev); | |
8199d3a7 CL |
1197 | |
1198 | /* | |
559fb51b SB |
1199 | * Because we don't atomically flush the following write it is |
1200 | * possible that in very rare cases it can reach the device in a way | |
1201 | * that races with a new response being written plus an error interrupt | |
1202 | * causing the NAPI interrupt handler below to return unhandled status | |
1203 | * to the OS. To protect against this would require flushing the write | |
1204 | * and doing both the write and the flush with interrupts off. Way too | |
1205 | * expensive and unjustifiable given the rarity of the race. | |
8199d3a7 | 1206 | */ |
559fb51b SB |
1207 | writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); |
1208 | return 0; | |
1209 | } | |
8199d3a7 | 1210 | |
559fb51b SB |
1211 | /* |
1212 | * Returns true if the device is already scheduled for polling. | |
1213 | */ | |
1214 | static inline int napi_is_scheduled(struct net_device *dev) | |
1215 | { | |
1216 | return test_bit(__LINK_STATE_RX_SCHED, &dev->state); | |
1217 | } | |
8199d3a7 | 1218 | |
559fb51b SB |
1219 | /* |
1220 | * NAPI version of the main interrupt handler. | |
1221 | */ | |
1222 | static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs) | |
1223 | { | |
1224 | int handled; | |
1225 | struct adapter *adapter = data; | |
1226 | struct sge *sge = adapter->sge; | |
1227 | struct respQ *q = &adapter->sge->respQ; | |
8199d3a7 | 1228 | |
559fb51b SB |
1229 | /* |
1230 | * Clear the SGE_DATA interrupt first thing. Normally the NAPI | |
1231 | * handler has control of the response queue and the interrupt handler | |
1232 | * can look at the queue reliably only once it knows NAPI is off. | |
1233 | * We can't wait that long to clear the SGE_DATA interrupt because we | |
1234 | * could race with t1_poll rearming the SGE interrupt, so we need to | |
1235 | * clear the interrupt speculatively and really early on. | |
1236 | */ | |
1237 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); | |
1238 | ||
1239 | spin_lock(&adapter->async_lock); | |
1240 | if (!napi_is_scheduled(sge->netdev)) { | |
1241 | struct respQ_e *e = &q->entries[q->cidx]; | |
1242 | ||
1243 | if (e->GenerationBit == q->genbit) { | |
1244 | if (e->DataValid || | |
1245 | process_pure_responses(adapter, e)) { | |
1246 | if (likely(napi_schedule_prep(sge->netdev))) | |
1247 | __netif_rx_schedule(sge->netdev); | |
1248 | else | |
1249 | printk(KERN_CRIT | |
1250 | "NAPI schedule failure!\n"); | |
1251 | } else | |
1252 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); | |
1253 | handled = 1; | |
1254 | goto unlock; | |
1255 | } else | |
1256 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); | |
1257 | } else | |
1258 | if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) | |
1259 | printk(KERN_ERR "data interrupt while NAPI running\n"); | |
1260 | ||
1261 | handled = t1_slow_intr_handler(adapter); | |
1262 | if (!handled) | |
1263 | sge->stats.unhandled_irqs++; | |
1264 | unlock: | |
1265 | spin_unlock(&adapter->async_lock); | |
1266 | return IRQ_RETVAL(handled != 0); | |
1267 | } | |
8199d3a7 | 1268 | |
559fb51b SB |
1269 | /* |
1270 | * Main interrupt handler, optimized assuming that we took a 'DATA' | |
1271 | * interrupt. | |
1272 | * | |
1273 | * 1. Clear the interrupt | |
1274 | * 2. Loop while we find valid descriptors and process them; accumulate | |
1275 | * information that can be processed after the loop | |
1276 | * 3. Tell the SGE at which index we stopped processing descriptors | |
1277 | * 4. Bookkeeping; free TX buffers, ring doorbell if there are any | |
1278 | * outstanding TX buffers waiting, replenish RX buffers, potentially | |
1279 | * reenable upper layers if they were turned off due to lack of TX | |
1280 | * resources which are available again. | |
1281 | * 5. If we took an interrupt, but no valid respQ descriptors was found we | |
1282 | * let the slow_intr_handler run and do error handling. | |
1283 | */ | |
1284 | static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs) | |
1285 | { | |
1286 | int work_done; | |
1287 | struct respQ_e *e; | |
1288 | struct adapter *adapter = cookie; | |
1289 | struct respQ *Q = &adapter->sge->respQ; | |
8199d3a7 | 1290 | |
559fb51b SB |
1291 | spin_lock(&adapter->async_lock); |
1292 | e = &Q->entries[Q->cidx]; | |
1293 | prefetch(e); | |
8199d3a7 | 1294 | |
559fb51b | 1295 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); |
8199d3a7 | 1296 | |
559fb51b SB |
1297 | if (likely(e->GenerationBit == Q->genbit)) |
1298 | work_done = process_responses(adapter, -1); | |
1299 | else | |
1300 | work_done = t1_slow_intr_handler(adapter); | |
8199d3a7 | 1301 | |
559fb51b SB |
1302 | /* |
1303 | * The unconditional clearing of the PL_CAUSE above may have raced | |
1304 | * with DMA completion and the corresponding generation of a response | |
1305 | * to cause us to miss the resulting data interrupt. The next write | |
1306 | * is also unconditional to recover the missed interrupt and render | |
1307 | * this race harmless. | |
1308 | */ | |
1309 | writel(Q->cidx, adapter->regs + A_SG_SLEEPING); | |
1310 | ||
1311 | if (!work_done) | |
1312 | adapter->sge->stats.unhandled_irqs++; | |
1313 | spin_unlock(&adapter->async_lock); | |
1314 | return IRQ_RETVAL(work_done != 0); | |
1315 | } | |
1316 | ||
1317 | intr_handler_t t1_select_intr_handler(adapter_t *adapter) | |
1318 | { | |
1319 | return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt; | |
1320 | } | |
1321 | ||
1322 | /* | |
1323 | * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. | |
1324 | * | |
1325 | * The code figures out how many entries the sk_buff will require in the | |
1326 | * cmdQ and updates the cmdQ data structure with the state once the enqueue | |
1327 | * has complete. Then, it doesn't access the global structure anymore, but | |
1328 | * uses the corresponding fields on the stack. In conjuction with a spinlock | |
1329 | * around that code, we can make the function reentrant without holding the | |
1330 | * lock when we actually enqueue (which might be expensive, especially on | |
1331 | * architectures with IO MMUs). | |
1332 | * | |
1333 | * This runs with softirqs disabled. | |
1334 | */ | |
1335 | unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, | |
1336 | unsigned int qid, struct net_device *dev) | |
1337 | { | |
1338 | struct sge *sge = adapter->sge; | |
1339 | struct cmdQ *q = &sge->cmdQ[qid]; | |
1340 | unsigned int credits, pidx, genbit, count; | |
1341 | ||
1342 | spin_lock(&q->lock); | |
1343 | reclaim_completed_tx(sge, q); | |
1344 | ||
1345 | pidx = q->pidx; | |
1346 | credits = q->size - q->in_use; | |
1347 | count = 1 + skb_shinfo(skb)->nr_frags; | |
1348 | ||
1349 | { /* Ethernet packet */ | |
1350 | if (unlikely(credits < count)) { | |
1351 | netif_stop_queue(dev); | |
1352 | set_bit(dev->if_port, &sge->stopped_tx_queues); | |
1353 | sge->stats.cmdQ_full[3]++; | |
1354 | spin_unlock(&q->lock); | |
1355 | CH_ERR("%s: Tx ring full while queue awake!\n", | |
1356 | adapter->name); | |
1357 | return 1; | |
8199d3a7 | 1358 | } |
559fb51b SB |
1359 | if (unlikely(credits - count < q->stop_thres)) { |
1360 | sge->stats.cmdQ_full[3]++; | |
1361 | netif_stop_queue(dev); | |
1362 | set_bit(dev->if_port, &sge->stopped_tx_queues); | |
1363 | } | |
1364 | } | |
1365 | q->in_use += count; | |
1366 | genbit = q->genbit; | |
1367 | q->pidx += count; | |
1368 | if (q->pidx >= q->size) { | |
1369 | q->pidx -= q->size; | |
1370 | q->genbit ^= 1; | |
8199d3a7 | 1371 | } |
559fb51b | 1372 | spin_unlock(&q->lock); |
8199d3a7 | 1373 | |
559fb51b | 1374 | write_tx_descs(adapter, skb, pidx, genbit, q); |
8199d3a7 CL |
1375 | |
1376 | /* | |
1377 | * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring | |
1378 | * the doorbell if the Q is asleep. There is a natural race, where | |
1379 | * the hardware is going to sleep just after we checked, however, | |
1380 | * then the interrupt handler will detect the outstanding TX packet | |
1381 | * and ring the doorbell for us. | |
1382 | */ | |
559fb51b SB |
1383 | if (qid) |
1384 | doorbell_pio(adapter, F_CMDQ1_ENABLE); | |
1385 | else { | |
1386 | clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | |
1387 | if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { | |
1388 | set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); | |
1389 | writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); | |
1390 | } | |
8199d3a7 CL |
1391 | } |
1392 | return 0; | |
1393 | } | |
1394 | ||
1395 | #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) | |
1396 | ||
559fb51b SB |
1397 | /* |
1398 | * eth_hdr_len - return the length of an Ethernet header | |
1399 | * @data: pointer to the start of the Ethernet header | |
1400 | * | |
1401 | * Returns the length of an Ethernet header, including optional VLAN tag. | |
1402 | */ | |
1403 | static inline int eth_hdr_len(const void *data) | |
1404 | { | |
1405 | const struct ethhdr *e = data; | |
1406 | ||
1407 | return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; | |
1408 | } | |
1409 | ||
8199d3a7 CL |
1410 | /* |
1411 | * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. | |
1412 | */ | |
1413 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1414 | { | |
1415 | struct adapter *adapter = dev->priv; | |
559fb51b SB |
1416 | struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port]; |
1417 | struct sge *sge = adapter->sge; | |
8199d3a7 | 1418 | struct cpl_tx_pkt *cpl; |
8199d3a7 | 1419 | |
559fb51b | 1420 | #ifdef NETIF_F_TSO |
8199d3a7 CL |
1421 | if (skb_shinfo(skb)->tso_size) { |
1422 | int eth_type; | |
1423 | struct cpl_tx_pkt_lso *hdr; | |
1424 | ||
559fb51b SB |
1425 | st->tso++; |
1426 | ||
8199d3a7 CL |
1427 | eth_type = skb->nh.raw - skb->data == ETH_HLEN ? |
1428 | CPL_ETH_II : CPL_ETH_II_VLAN; | |
1429 | ||
1430 | hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); | |
1431 | hdr->opcode = CPL_TX_PKT_LSO; | |
1432 | hdr->ip_csum_dis = hdr->l4_csum_dis = 0; | |
1433 | hdr->ip_hdr_words = skb->nh.iph->ihl; | |
1434 | hdr->tcp_hdr_words = skb->h.th->doff; | |
1435 | hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, | |
1436 | skb_shinfo(skb)->tso_size)); | |
1437 | hdr->len = htonl(skb->len - sizeof(*hdr)); | |
1438 | cpl = (struct cpl_tx_pkt *)hdr; | |
559fb51b | 1439 | sge->stats.tx_lso_pkts++; |
8199d3a7 | 1440 | } else |
559fb51b | 1441 | #endif |
8199d3a7 CL |
1442 | { |
1443 | /* | |
559fb51b SB |
1444 | * Packets shorter than ETH_HLEN can break the MAC, drop them |
1445 | * early. Also, we may get oversized packets because some | |
1446 | * parts of the kernel don't handle our unusual hard_header_len | |
1447 | * right, drop those too. | |
8199d3a7 | 1448 | */ |
559fb51b SB |
1449 | if (unlikely(skb->len < ETH_HLEN || |
1450 | skb->len > dev->mtu + eth_hdr_len(skb->data))) { | |
1451 | dev_kfree_skb_any(skb); | |
1452 | return NET_XMIT_SUCCESS; | |
1453 | } | |
1454 | ||
1455 | /* | |
1456 | * We are using a non-standard hard_header_len and some kernel | |
1457 | * components, such as pktgen, do not handle it right. | |
1458 | * Complain when this happens but try to fix things up. | |
1459 | */ | |
1460 | if (unlikely(skb_headroom(skb) < | |
1461 | dev->hard_header_len - ETH_HLEN)) { | |
1462 | struct sk_buff *orig_skb = skb; | |
1463 | ||
1464 | if (net_ratelimit()) | |
1465 | printk(KERN_ERR "%s: inadequate headroom in " | |
1466 | "Tx packet\n", dev->name); | |
1467 | skb = skb_realloc_headroom(skb, sizeof(*cpl)); | |
1468 | dev_kfree_skb_any(orig_skb); | |
1469 | if (!skb) | |
1470 | return -ENOMEM; | |
1471 | } | |
8199d3a7 CL |
1472 | |
1473 | if (!(adapter->flags & UDP_CSUM_CAPABLE) && | |
1474 | skb->ip_summed == CHECKSUM_HW && | |
559fb51b SB |
1475 | skb->nh.iph->protocol == IPPROTO_UDP) |
1476 | if (unlikely(skb_checksum_help(skb, 0))) { | |
1477 | dev_kfree_skb_any(skb); | |
1478 | return -ENOMEM; | |
1479 | } | |
8199d3a7 | 1480 | |
559fb51b SB |
1481 | /* Hmmm, assuming to catch the gratious arp... and we'll use |
1482 | * it to flush out stuck espi packets... | |
1483 | */ | |
1484 | if (unlikely(!adapter->sge->espibug_skb)) { | |
8199d3a7 | 1485 | if (skb->protocol == htons(ETH_P_ARP) && |
559fb51b SB |
1486 | skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) { |
1487 | adapter->sge->espibug_skb = skb; | |
1488 | /* We want to re-use this skb later. We | |
1489 | * simply bump the reference count and it | |
1490 | * will not be freed... | |
1491 | */ | |
1492 | skb = skb_get(skb); | |
1493 | } | |
8199d3a7 | 1494 | } |
559fb51b SB |
1495 | |
1496 | cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); | |
8199d3a7 CL |
1497 | cpl->opcode = CPL_TX_PKT; |
1498 | cpl->ip_csum_dis = 1; /* SW calculates IP csum */ | |
1499 | cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1; | |
1500 | /* the length field isn't used so don't bother setting it */ | |
559fb51b SB |
1501 | |
1502 | st->tx_cso += (skb->ip_summed == CHECKSUM_HW); | |
1503 | sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW); | |
1504 | sge->stats.tx_reg_pkts++; | |
8199d3a7 CL |
1505 | } |
1506 | cpl->iff = dev->if_port; | |
1507 | ||
1508 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | |
1509 | if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { | |
1510 | cpl->vlan_valid = 1; | |
1511 | cpl->vlan = htons(vlan_tx_tag_get(skb)); | |
559fb51b | 1512 | st->vlan_insert++; |
8199d3a7 CL |
1513 | } else |
1514 | #endif | |
1515 | cpl->vlan_valid = 0; | |
1516 | ||
1517 | dev->trans_start = jiffies; | |
559fb51b SB |
1518 | return t1_sge_tx(skb, adapter, 0, dev); |
1519 | } | |
8199d3a7 | 1520 | |
559fb51b SB |
1521 | /* |
1522 | * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. | |
1523 | */ | |
1524 | static void sge_tx_reclaim_cb(unsigned long data) | |
1525 | { | |
1526 | int i; | |
1527 | struct sge *sge = (struct sge *)data; | |
1528 | ||
1529 | for (i = 0; i < SGE_CMDQ_N; ++i) { | |
1530 | struct cmdQ *q = &sge->cmdQ[i]; | |
1531 | ||
1532 | if (!spin_trylock(&q->lock)) | |
1533 | continue; | |
8199d3a7 | 1534 | |
559fb51b SB |
1535 | reclaim_completed_tx(sge, q); |
1536 | if (i == 0 && q->in_use) /* flush pending credits */ | |
1537 | writel(F_CMDQ0_ENABLE, | |
1538 | sge->adapter->regs + A_SG_DOORBELL); | |
8199d3a7 | 1539 | |
559fb51b SB |
1540 | spin_unlock(&q->lock); |
1541 | } | |
1542 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | |
1543 | } | |
1544 | ||
1545 | /* | |
1546 | * Propagate changes of the SGE coalescing parameters to the HW. | |
1547 | */ | |
1548 | int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) | |
1549 | { | |
1550 | sge->netdev->poll = t1_poll; | |
1551 | sge->fixed_intrtimer = p->rx_coalesce_usecs * | |
1552 | core_ticks_per_usec(sge->adapter); | |
1553 | writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); | |
8199d3a7 CL |
1554 | return 0; |
1555 | } | |
1556 | ||
559fb51b SB |
1557 | /* |
1558 | * Allocates both RX and TX resources and configures the SGE. However, | |
1559 | * the hardware is not enabled yet. | |
1560 | */ | |
1561 | int t1_sge_configure(struct sge *sge, struct sge_params *p) | |
8199d3a7 | 1562 | { |
559fb51b SB |
1563 | if (alloc_rx_resources(sge, p)) |
1564 | return -ENOMEM; | |
1565 | if (alloc_tx_resources(sge, p)) { | |
1566 | free_rx_resources(sge); | |
1567 | return -ENOMEM; | |
1568 | } | |
1569 | configure_sge(sge, p); | |
1570 | ||
1571 | /* | |
1572 | * Now that we have sized the free lists calculate the payload | |
1573 | * capacity of the large buffers. Other parts of the driver use | |
1574 | * this to set the max offload coalescing size so that RX packets | |
1575 | * do not overflow our large buffers. | |
1576 | */ | |
1577 | p->large_buf_capacity = jumbo_payload_capacity(sge); | |
1578 | return 0; | |
1579 | } | |
8199d3a7 | 1580 | |
559fb51b SB |
1581 | /* |
1582 | * Disables the DMA engine. | |
1583 | */ | |
1584 | void t1_sge_stop(struct sge *sge) | |
1585 | { | |
1586 | writel(0, sge->adapter->regs + A_SG_CONTROL); | |
1587 | (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ | |
1588 | if (is_T2(sge->adapter)) | |
1589 | del_timer_sync(&sge->espibug_timer); | |
1590 | del_timer_sync(&sge->tx_reclaim_timer); | |
8199d3a7 CL |
1591 | } |
1592 | ||
559fb51b SB |
1593 | /* |
1594 | * Enables the DMA engine. | |
1595 | */ | |
1596 | void t1_sge_start(struct sge *sge) | |
8199d3a7 | 1597 | { |
559fb51b SB |
1598 | refill_free_list(sge, &sge->freelQ[0]); |
1599 | refill_free_list(sge, &sge->freelQ[1]); | |
1600 | ||
1601 | writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); | |
1602 | doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); | |
1603 | (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ | |
1604 | ||
1605 | mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); | |
1606 | ||
1607 | if (is_T2(sge->adapter)) | |
1608 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); | |
1609 | } | |
1610 | ||
1611 | /* | |
1612 | * Callback for the T2 ESPI 'stuck packet feature' workaorund | |
1613 | */ | |
1614 | static void espibug_workaround(void *data) | |
1615 | { | |
1616 | struct adapter *adapter = (struct adapter *)data; | |
8199d3a7 CL |
1617 | struct sge *sge = adapter->sge; |
1618 | ||
559fb51b SB |
1619 | if (netif_running(adapter->port[0].dev)) { |
1620 | struct sk_buff *skb = sge->espibug_skb; | |
1621 | ||
1622 | u32 seop = t1_espi_get_mon(adapter, 0x930, 0); | |
1623 | ||
1624 | if ((seop & 0xfff0fff) == 0xfff && skb) { | |
1625 | if (!skb->cb[0]) { | |
1626 | u8 ch_mac_addr[ETH_ALEN] = | |
1627 | {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; | |
1628 | memcpy(skb->data + sizeof(struct cpl_tx_pkt), | |
1629 | ch_mac_addr, ETH_ALEN); | |
1630 | memcpy(skb->data + skb->len - 10, ch_mac_addr, | |
1631 | ETH_ALEN); | |
1632 | skb->cb[0] = 0xff; | |
1633 | } | |
1634 | ||
1635 | /* bump the reference count to avoid freeing of the | |
1636 | * skb once the DMA has completed. | |
1637 | */ | |
1638 | skb = skb_get(skb); | |
1639 | t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); | |
1640 | } | |
1641 | } | |
1642 | mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); | |
8199d3a7 CL |
1643 | } |
1644 | ||
559fb51b SB |
1645 | /* |
1646 | * Creates a t1_sge structure and returns suggested resource parameters. | |
1647 | */ | |
1648 | struct sge * __devinit t1_sge_create(struct adapter *adapter, | |
1649 | struct sge_params *p) | |
1650 | { | |
1651 | struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL); | |
1652 | ||
1653 | if (!sge) | |
1654 | return NULL; | |
1655 | memset(sge, 0, sizeof(*sge)); | |
1656 | ||
1657 | sge->adapter = adapter; | |
1658 | sge->netdev = adapter->port[0].dev; | |
1659 | sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; | |
1660 | sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; | |
1661 | ||
1662 | init_timer(&sge->tx_reclaim_timer); | |
1663 | sge->tx_reclaim_timer.data = (unsigned long)sge; | |
1664 | sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; | |
1665 | ||
1666 | if (is_T2(sge->adapter)) { | |
1667 | init_timer(&sge->espibug_timer); | |
1668 | sge->espibug_timer.function = (void *)&espibug_workaround; | |
1669 | sge->espibug_timer.data = (unsigned long)sge->adapter; | |
1670 | sge->espibug_timeout = 1; | |
1671 | } | |
1672 | ||
1673 | ||
1674 | p->cmdQ_size[0] = SGE_CMDQ0_E_N; | |
1675 | p->cmdQ_size[1] = SGE_CMDQ1_E_N; | |
1676 | p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; | |
1677 | p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; | |
1678 | p->rx_coalesce_usecs = 50; | |
1679 | p->coalesce_enable = 0; | |
1680 | p->sample_interval_usecs = 0; | |
1681 | p->polling = 0; | |
1682 | ||
1683 | return sge; | |
1684 | } |