Commit | Line | Data |
---|---|---|
cfb739b4 GKH |
1 | /* |
2 | * Agere Systems Inc. | |
3 | * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs | |
4 | * | |
64f93036 | 5 | * Copyright © 2005 Agere Systems Inc. |
cfb739b4 GKH |
6 | * All rights reserved. |
7 | * http://www.agere.com | |
8 | * | |
9 | *------------------------------------------------------------------------------ | |
10 | * | |
11 | * et1310_rx.c - Routines used to perform data reception | |
12 | * | |
13 | *------------------------------------------------------------------------------ | |
14 | * | |
15 | * SOFTWARE LICENSE | |
16 | * | |
17 | * This software is provided subject to the following terms and conditions, | |
18 | * which you should read carefully before using the software. Using this | |
19 | * software indicates your acceptance of these terms and conditions. If you do | |
20 | * not agree with these terms and conditions, do not use the software. | |
21 | * | |
64f93036 | 22 | * Copyright © 2005 Agere Systems Inc. |
cfb739b4 GKH |
23 | * All rights reserved. |
24 | * | |
25 | * Redistribution and use in source or binary forms, with or without | |
26 | * modifications, are permitted provided that the following conditions are met: | |
27 | * | |
28 | * . Redistributions of source code must retain the above copyright notice, this | |
29 | * list of conditions and the following Disclaimer as comments in the code as | |
30 | * well as in the documentation and/or other materials provided with the | |
31 | * distribution. | |
32 | * | |
33 | * . Redistributions in binary form must reproduce the above copyright notice, | |
34 | * this list of conditions and the following Disclaimer in the documentation | |
35 | * and/or other materials provided with the distribution. | |
36 | * | |
37 | * . Neither the name of Agere Systems Inc. nor the names of the contributors | |
38 | * may be used to endorse or promote products derived from this software | |
39 | * without specific prior written permission. | |
40 | * | |
41 | * Disclaimer | |
42 | * | |
64f93036 | 43 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
cfb739b4 GKH |
44 | * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF |
45 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY | |
46 | * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN | |
47 | * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY | |
48 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
49 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
50 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
51 | * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT | |
52 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | |
53 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH | |
54 | * DAMAGE. | |
55 | * | |
56 | */ | |
57 | ||
58 | #include "et131x_version.h" | |
cfb739b4 GKH |
59 | #include "et131x_defs.h" |
60 | ||
61 | #include <linux/pci.h> | |
62 | #include <linux/init.h> | |
63 | #include <linux/module.h> | |
64 | #include <linux/types.h> | |
65 | #include <linux/kernel.h> | |
66 | ||
67 | #include <linux/sched.h> | |
68 | #include <linux/ptrace.h> | |
69 | #include <linux/slab.h> | |
70 | #include <linux/ctype.h> | |
71 | #include <linux/string.h> | |
72 | #include <linux/timer.h> | |
73 | #include <linux/interrupt.h> | |
74 | #include <linux/in.h> | |
75 | #include <linux/delay.h> | |
64f93036 AC |
76 | #include <linux/io.h> |
77 | #include <linux/bitops.h> | |
cfb739b4 | 78 | #include <asm/system.h> |
cfb739b4 GKH |
79 | |
80 | #include <linux/netdevice.h> | |
81 | #include <linux/etherdevice.h> | |
82 | #include <linux/skbuff.h> | |
83 | #include <linux/if_arp.h> | |
84 | #include <linux/ioport.h> | |
85 | ||
86 | #include "et1310_phy.h" | |
87 | #include "et1310_pm.h" | |
88 | #include "et1310_jagcore.h" | |
89 | ||
90 | #include "et131x_adapter.h" | |
91 | #include "et131x_initpci.h" | |
92 | ||
93 | #include "et1310_rx.h" | |
94 | ||
cfb739b4 | 95 | |
25ad00bb | 96 | void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd); |
cfb739b4 GKH |
97 | |
98 | /** | |
99 | * et131x_rx_dma_memory_alloc | |
100 | * @adapter: pointer to our private adapter structure | |
101 | * | |
102 | * Returns 0 on success and errno on failure (as defined in errno.h) | |
103 | * | |
104 | * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, | |
105 | * and the Packet Status Ring. | |
106 | */ | |
107 | int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) | |
108 | { | |
4fbdf811 AC |
109 | u32 i, j; |
110 | u32 bufsize; | |
111 | u32 pktStatRingSize, FBRChunkSize; | |
cfb739b4 GKH |
112 | RX_RING_t *rx_ring; |
113 | ||
cfb739b4 | 114 | /* Setup some convenience pointers */ |
64f93036 | 115 | rx_ring = (RX_RING_t *) &adapter->RxRing; |
cfb739b4 GKH |
116 | |
117 | /* Alloc memory for the lookup table */ | |
118 | #ifdef USE_FBR0 | |
119 | rx_ring->Fbr[0] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL); | |
120 | #endif | |
121 | ||
122 | rx_ring->Fbr[1] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL); | |
123 | ||
124 | /* The first thing we will do is configure the sizes of the buffer | |
125 | * rings. These will change based on jumbo packet support. Larger | |
126 | * jumbo packets increases the size of each entry in FBR0, and the | |
127 | * number of entries in FBR0, while at the same time decreasing the | |
128 | * number of entries in FBR1. | |
129 | * | |
130 | * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 | |
131 | * entries are huge in order to accomodate a "jumbo" frame, then it | |
132 | * will have less entries. Conversely, FBR1 will now be relied upon | |
133 | * to carry more "normal" frames, thus it's entry size also increases | |
134 | * and the number of entries goes up too (since it now carries | |
135 | * "small" + "regular" packets. | |
136 | * | |
137 | * In this scheme, we try to maintain 512 entries between the two | |
138 | * rings. Also, FBR1 remains a constant size - when it's size doubles | |
139 | * the number of entries halves. FBR0 increases in size, however. | |
140 | */ | |
141 | ||
142 | if (adapter->RegistryJumboPacket < 2048) { | |
143 | #ifdef USE_FBR0 | |
144 | rx_ring->Fbr0BufferSize = 256; | |
145 | rx_ring->Fbr0NumEntries = 512; | |
146 | #endif | |
147 | rx_ring->Fbr1BufferSize = 2048; | |
148 | rx_ring->Fbr1NumEntries = 512; | |
149 | } else if (adapter->RegistryJumboPacket < 4096) { | |
150 | #ifdef USE_FBR0 | |
151 | rx_ring->Fbr0BufferSize = 512; | |
152 | rx_ring->Fbr0NumEntries = 1024; | |
153 | #endif | |
154 | rx_ring->Fbr1BufferSize = 4096; | |
155 | rx_ring->Fbr1NumEntries = 512; | |
156 | } else { | |
157 | #ifdef USE_FBR0 | |
158 | rx_ring->Fbr0BufferSize = 1024; | |
159 | rx_ring->Fbr0NumEntries = 768; | |
160 | #endif | |
161 | rx_ring->Fbr1BufferSize = 16384; | |
162 | rx_ring->Fbr1NumEntries = 128; | |
163 | } | |
164 | ||
165 | #ifdef USE_FBR0 | |
166 | adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr0NumEntries + | |
167 | adapter->RxRing.Fbr1NumEntries; | |
168 | #else | |
169 | adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr1NumEntries; | |
170 | #endif | |
171 | ||
172 | /* Allocate an area of memory for Free Buffer Ring 1 */ | |
173 | bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff; | |
174 | rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev, | |
175 | bufsize, | |
176 | &rx_ring->pFbr1RingPa); | |
177 | if (!rx_ring->pFbr1RingVa) { | |
15700039 | 178 | dev_err(&adapter->pdev->dev, |
cfb739b4 | 179 | "Cannot alloc memory for Free Buffer Ring 1\n"); |
cfb739b4 GKH |
180 | return -ENOMEM; |
181 | } | |
182 | ||
183 | /* Save physical address | |
184 | * | |
185 | * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, | |
186 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses | |
187 | * are ever returned, make sure the high part is retrieved here | |
188 | * before storing the adjusted address. | |
189 | */ | |
190 | rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa; | |
191 | ||
192 | /* Align Free Buffer Ring 1 on a 4K boundary */ | |
193 | et131x_align_allocated_memory(adapter, | |
194 | &rx_ring->Fbr1Realpa, | |
195 | &rx_ring->Fbr1offset, 0x0FFF); | |
196 | ||
197 | rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa + | |
198 | rx_ring->Fbr1offset); | |
199 | ||
200 | #ifdef USE_FBR0 | |
201 | /* Allocate an area of memory for Free Buffer Ring 0 */ | |
202 | bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff; | |
203 | rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev, | |
204 | bufsize, | |
205 | &rx_ring->pFbr0RingPa); | |
206 | if (!rx_ring->pFbr0RingVa) { | |
15700039 | 207 | dev_err(&adapter->pdev->dev, |
cfb739b4 | 208 | "Cannot alloc memory for Free Buffer Ring 0\n"); |
cfb739b4 GKH |
209 | return -ENOMEM; |
210 | } | |
211 | ||
212 | /* Save physical address | |
213 | * | |
214 | * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, | |
215 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses | |
216 | * are ever returned, make sure the high part is retrieved here before | |
217 | * storing the adjusted address. | |
218 | */ | |
219 | rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa; | |
220 | ||
221 | /* Align Free Buffer Ring 0 on a 4K boundary */ | |
222 | et131x_align_allocated_memory(adapter, | |
223 | &rx_ring->Fbr0Realpa, | |
224 | &rx_ring->Fbr0offset, 0x0FFF); | |
225 | ||
226 | rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa + | |
227 | rx_ring->Fbr0offset); | |
228 | #endif | |
229 | ||
4fbdf811 AC |
230 | for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS); |
231 | i++) { | |
232 | u64 Fbr1Offset; | |
233 | u64 Fbr1TempPa; | |
234 | u32 Fbr1Align; | |
cfb739b4 GKH |
235 | |
236 | /* This code allocates an area of memory big enough for N | |
237 | * free buffers + (buffer_size - 1) so that the buffers can | |
238 | * be aligned on 4k boundaries. If each buffer were aligned | |
239 | * to a buffer_size boundary, the effect would be to double | |
240 | * the size of FBR0. By allocating N buffers at once, we | |
241 | * reduce this overhead. | |
242 | */ | |
64f93036 | 243 | if (rx_ring->Fbr1BufferSize > 4096) |
cfb739b4 | 244 | Fbr1Align = 4096; |
64f93036 | 245 | else |
cfb739b4 | 246 | Fbr1Align = rx_ring->Fbr1BufferSize; |
cfb739b4 GKH |
247 | |
248 | FBRChunkSize = | |
249 | (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1; | |
4fbdf811 | 250 | rx_ring->Fbr1MemVa[i] = |
cfb739b4 | 251 | pci_alloc_consistent(adapter->pdev, FBRChunkSize, |
4fbdf811 | 252 | &rx_ring->Fbr1MemPa[i]); |
cfb739b4 | 253 | |
4fbdf811 | 254 | if (!rx_ring->Fbr1MemVa[i]) { |
15700039 AC |
255 | dev_err(&adapter->pdev->dev, |
256 | "Could not alloc memory\n"); | |
cfb739b4 GKH |
257 | return -ENOMEM; |
258 | } | |
259 | ||
260 | /* See NOTE in "Save Physical Address" comment above */ | |
4fbdf811 | 261 | Fbr1TempPa = rx_ring->Fbr1MemPa[i]; |
cfb739b4 GKH |
262 | |
263 | et131x_align_allocated_memory(adapter, | |
264 | &Fbr1TempPa, | |
265 | &Fbr1Offset, (Fbr1Align - 1)); | |
266 | ||
4fbdf811 AC |
267 | for (j = 0; j < FBR_CHUNKS; j++) { |
268 | u32 index = (i * FBR_CHUNKS) + j; | |
cfb739b4 GKH |
269 | |
270 | /* Save the Virtual address of this index for quick | |
271 | * access later | |
272 | */ | |
273 | rx_ring->Fbr[1]->Va[index] = | |
4fbdf811 AC |
274 | (uint8_t *) rx_ring->Fbr1MemVa[i] + |
275 | (j * rx_ring->Fbr1BufferSize) + Fbr1Offset; | |
cfb739b4 GKH |
276 | |
277 | /* now store the physical address in the descriptor | |
278 | * so the device can access it | |
279 | */ | |
280 | rx_ring->Fbr[1]->PAHigh[index] = | |
4fbdf811 AC |
281 | (u32) (Fbr1TempPa >> 32); |
282 | rx_ring->Fbr[1]->PALow[index] = (u32) Fbr1TempPa; | |
cfb739b4 GKH |
283 | |
284 | Fbr1TempPa += rx_ring->Fbr1BufferSize; | |
285 | ||
286 | rx_ring->Fbr[1]->Buffer1[index] = | |
287 | rx_ring->Fbr[1]->Va[index]; | |
288 | rx_ring->Fbr[1]->Buffer2[index] = | |
289 | rx_ring->Fbr[1]->Va[index] - 4; | |
290 | } | |
291 | } | |
292 | ||
293 | #ifdef USE_FBR0 | |
294 | /* Same for FBR0 (if in use) */ | |
4fbdf811 AC |
295 | for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS); |
296 | i++) { | |
297 | u64 Fbr0Offset; | |
298 | u64 Fbr0TempPa; | |
cfb739b4 GKH |
299 | |
300 | FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1; | |
4fbdf811 | 301 | rx_ring->Fbr0MemVa[i] = |
cfb739b4 | 302 | pci_alloc_consistent(adapter->pdev, FBRChunkSize, |
4fbdf811 | 303 | &rx_ring->Fbr0MemPa[i]); |
cfb739b4 | 304 | |
4fbdf811 | 305 | if (!rx_ring->Fbr0MemVa[i]) { |
15700039 AC |
306 | dev_err(&adapter->pdev->dev, |
307 | "Could not alloc memory\n"); | |
cfb739b4 GKH |
308 | return -ENOMEM; |
309 | } | |
310 | ||
311 | /* See NOTE in "Save Physical Address" comment above */ | |
4fbdf811 | 312 | Fbr0TempPa = rx_ring->Fbr0MemPa[i]; |
cfb739b4 GKH |
313 | |
314 | et131x_align_allocated_memory(adapter, | |
315 | &Fbr0TempPa, | |
316 | &Fbr0Offset, | |
317 | rx_ring->Fbr0BufferSize - 1); | |
318 | ||
4fbdf811 AC |
319 | for (j = 0; j < FBR_CHUNKS; j++) { |
320 | u32 index = (i * FBR_CHUNKS) + j; | |
cfb739b4 GKH |
321 | |
322 | rx_ring->Fbr[0]->Va[index] = | |
4fbdf811 AC |
323 | (uint8_t *) rx_ring->Fbr0MemVa[i] + |
324 | (j * rx_ring->Fbr0BufferSize) + Fbr0Offset; | |
cfb739b4 GKH |
325 | |
326 | rx_ring->Fbr[0]->PAHigh[index] = | |
4fbdf811 AC |
327 | (u32) (Fbr0TempPa >> 32); |
328 | rx_ring->Fbr[0]->PALow[index] = (u32) Fbr0TempPa; | |
cfb739b4 GKH |
329 | |
330 | Fbr0TempPa += rx_ring->Fbr0BufferSize; | |
331 | ||
332 | rx_ring->Fbr[0]->Buffer1[index] = | |
333 | rx_ring->Fbr[0]->Va[index]; | |
334 | rx_ring->Fbr[0]->Buffer2[index] = | |
335 | rx_ring->Fbr[0]->Va[index] - 4; | |
336 | } | |
337 | } | |
338 | #endif | |
339 | ||
340 | /* Allocate an area of memory for FIFO of Packet Status ring entries */ | |
341 | pktStatRingSize = | |
342 | sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries; | |
343 | ||
344 | rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev, | |
9c60684b | 345 | pktStatRingSize, |
cfb739b4 GKH |
346 | &rx_ring->pPSRingPa); |
347 | ||
348 | if (!rx_ring->pPSRingVa) { | |
15700039 | 349 | dev_err(&adapter->pdev->dev, |
cfb739b4 | 350 | "Cannot alloc memory for Packet Status Ring\n"); |
cfb739b4 GKH |
351 | return -ENOMEM; |
352 | } | |
9c60684b | 353 | printk("PSR %lx\n", (unsigned long) rx_ring->pPSRingPa); |
cfb739b4 | 354 | |
9c60684b | 355 | /* |
cfb739b4 GKH |
356 | * NOTE : pci_alloc_consistent(), used above to alloc DMA regions, |
357 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses | |
358 | * are ever returned, make sure the high part is retrieved here before | |
359 | * storing the adjusted address. | |
360 | */ | |
cfb739b4 GKH |
361 | |
362 | /* Allocate an area of memory for writeback of status information */ | |
363 | rx_ring->pRxStatusVa = pci_alloc_consistent(adapter->pdev, | |
9c60684b AC |
364 | sizeof(RX_STATUS_BLOCK_t), |
365 | &rx_ring->pRxStatusPa); | |
cfb739b4 | 366 | if (!rx_ring->pRxStatusVa) { |
15700039 | 367 | dev_err(&adapter->pdev->dev, |
cfb739b4 | 368 | "Cannot alloc memory for Status Block\n"); |
cfb739b4 GKH |
369 | return -ENOMEM; |
370 | } | |
cfb739b4 | 371 | rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD; |
9c60684b | 372 | printk("PRS %lx\n", (unsigned long)rx_ring->pRxStatusPa); |
cfb739b4 GKH |
373 | |
374 | /* Recv | |
375 | * pci_pool_create initializes a lookaside list. After successful | |
376 | * creation, nonpaged fixed-size blocks can be allocated from and | |
377 | * freed to the lookaside list. | |
378 | * RFDs will be allocated from this pool. | |
379 | */ | |
380 | rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name, | |
381 | sizeof(MP_RFD), | |
382 | 0, | |
383 | SLAB_CACHE_DMA | | |
384 | SLAB_HWCACHE_ALIGN, | |
385 | NULL); | |
386 | ||
f6b35d66 | 387 | adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE; |
cfb739b4 GKH |
388 | |
389 | /* The RFDs are going to be put on lists later on, so initialize the | |
390 | * lists now. | |
391 | */ | |
392 | INIT_LIST_HEAD(&rx_ring->RecvList); | |
cfb739b4 GKH |
393 | return 0; |
394 | } | |
395 | ||
396 | /** | |
397 | * et131x_rx_dma_memory_free - Free all memory allocated within this module. | |
398 | * @adapter: pointer to our private adapter structure | |
399 | */ | |
400 | void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) | |
401 | { | |
4fbdf811 AC |
402 | u32 index; |
403 | u32 bufsize; | |
404 | u32 pktStatRingSize; | |
405 | PMP_RFD rfd; | |
cfb739b4 GKH |
406 | RX_RING_t *rx_ring; |
407 | ||
cfb739b4 | 408 | /* Setup some convenience pointers */ |
64f93036 | 409 | rx_ring = (RX_RING_t *) &adapter->RxRing; |
cfb739b4 GKH |
410 | |
411 | /* Free RFDs and associated packet descriptors */ | |
15700039 | 412 | WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd); |
cfb739b4 GKH |
413 | |
414 | while (!list_empty(&rx_ring->RecvList)) { | |
4fbdf811 | 415 | rfd = (MP_RFD *) list_entry(rx_ring->RecvList.next, |
cfb739b4 GKH |
416 | MP_RFD, list_node); |
417 | ||
4fbdf811 | 418 | list_del(&rfd->list_node); |
f432c55e AC |
419 | rfd->Packet = NULL; |
420 | kmem_cache_free(adapter->RxRing.RecvLookaside, rfd); | |
cfb739b4 GKH |
421 | } |
422 | ||
cfb739b4 GKH |
423 | /* Free Free Buffer Ring 1 */ |
424 | if (rx_ring->pFbr1RingVa) { | |
425 | /* First the packet memory */ | |
426 | for (index = 0; index < | |
427 | (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) { | |
428 | if (rx_ring->Fbr1MemVa[index]) { | |
4fbdf811 | 429 | u32 Fbr1Align; |
cfb739b4 | 430 | |
64f93036 | 431 | if (rx_ring->Fbr1BufferSize > 4096) |
cfb739b4 | 432 | Fbr1Align = 4096; |
64f93036 | 433 | else |
cfb739b4 | 434 | Fbr1Align = rx_ring->Fbr1BufferSize; |
cfb739b4 GKH |
435 | |
436 | bufsize = | |
437 | (rx_ring->Fbr1BufferSize * FBR_CHUNKS) + | |
438 | Fbr1Align - 1; | |
439 | ||
440 | pci_free_consistent(adapter->pdev, | |
441 | bufsize, | |
442 | rx_ring->Fbr1MemVa[index], | |
443 | rx_ring->Fbr1MemPa[index]); | |
444 | ||
445 | rx_ring->Fbr1MemVa[index] = NULL; | |
446 | } | |
447 | } | |
448 | ||
449 | /* Now the FIFO itself */ | |
64f93036 AC |
450 | rx_ring->pFbr1RingVa = (void *)((uint8_t *) |
451 | rx_ring->pFbr1RingVa - rx_ring->Fbr1offset); | |
cfb739b4 GKH |
452 | |
453 | bufsize = | |
454 | (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff; | |
455 | ||
456 | pci_free_consistent(adapter->pdev, | |
457 | bufsize, | |
458 | rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa); | |
459 | ||
460 | rx_ring->pFbr1RingVa = NULL; | |
461 | } | |
462 | ||
463 | #ifdef USE_FBR0 | |
464 | /* Now the same for Free Buffer Ring 0 */ | |
465 | if (rx_ring->pFbr0RingVa) { | |
466 | /* First the packet memory */ | |
467 | for (index = 0; index < | |
468 | (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) { | |
469 | if (rx_ring->Fbr0MemVa[index]) { | |
470 | bufsize = | |
471 | (rx_ring->Fbr0BufferSize * | |
472 | (FBR_CHUNKS + 1)) - 1; | |
473 | ||
474 | pci_free_consistent(adapter->pdev, | |
475 | bufsize, | |
476 | rx_ring->Fbr0MemVa[index], | |
477 | rx_ring->Fbr0MemPa[index]); | |
478 | ||
479 | rx_ring->Fbr0MemVa[index] = NULL; | |
480 | } | |
481 | } | |
482 | ||
483 | /* Now the FIFO itself */ | |
64f93036 AC |
484 | rx_ring->pFbr0RingVa = (void *)((uint8_t *) |
485 | rx_ring->pFbr0RingVa - rx_ring->Fbr0offset); | |
cfb739b4 GKH |
486 | |
487 | bufsize = | |
488 | (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff; | |
489 | ||
490 | pci_free_consistent(adapter->pdev, | |
491 | bufsize, | |
492 | rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa); | |
493 | ||
494 | rx_ring->pFbr0RingVa = NULL; | |
495 | } | |
496 | #endif | |
497 | ||
498 | /* Free Packet Status Ring */ | |
499 | if (rx_ring->pPSRingVa) { | |
cfb739b4 GKH |
500 | pktStatRingSize = |
501 | sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries; | |
502 | ||
9c60684b | 503 | pci_free_consistent(adapter->pdev, pktStatRingSize, |
cfb739b4 GKH |
504 | rx_ring->pPSRingVa, rx_ring->pPSRingPa); |
505 | ||
506 | rx_ring->pPSRingVa = NULL; | |
507 | } | |
508 | ||
509 | /* Free area of memory for the writeback of status information */ | |
510 | if (rx_ring->pRxStatusVa) { | |
cfb739b4 | 511 | pci_free_consistent(adapter->pdev, |
9c60684b | 512 | sizeof(RX_STATUS_BLOCK_t), |
64f93036 | 513 | rx_ring->pRxStatusVa, rx_ring->pRxStatusPa); |
cfb739b4 GKH |
514 | |
515 | rx_ring->pRxStatusVa = NULL; | |
516 | } | |
517 | ||
518 | /* Free receive buffer pool */ | |
519 | ||
520 | /* Free receive packet pool */ | |
521 | ||
522 | /* Destroy the lookaside (RFD) pool */ | |
f6b35d66 | 523 | if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) { |
cfb739b4 | 524 | kmem_cache_destroy(rx_ring->RecvLookaside); |
f6b35d66 | 525 | adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE; |
cfb739b4 GKH |
526 | } |
527 | ||
528 | /* Free the FBR Lookup Table */ | |
529 | #ifdef USE_FBR0 | |
530 | kfree(rx_ring->Fbr[0]); | |
531 | #endif | |
532 | ||
533 | kfree(rx_ring->Fbr[1]); | |
534 | ||
535 | /* Reset Counters */ | |
536 | rx_ring->nReadyRecv = 0; | |
cfb739b4 GKH |
537 | } |
538 | ||
539 | /** | |
540 | * et131x_init_recv - Initialize receive data structures. | |
541 | * @adapter: pointer to our private adapter structure | |
542 | * | |
543 | * Returns 0 on success and errno on failure (as defined in errno.h) | |
544 | */ | |
545 | int et131x_init_recv(struct et131x_adapter *adapter) | |
546 | { | |
547 | int status = -ENOMEM; | |
4fbdf811 AC |
548 | PMP_RFD rfd = NULL; |
549 | u32 rfdct; | |
550 | u32 numrfd = 0; | |
cfb739b4 GKH |
551 | RX_RING_t *rx_ring = NULL; |
552 | ||
cfb739b4 | 553 | /* Setup some convenience pointers */ |
64f93036 | 554 | rx_ring = (RX_RING_t *) &adapter->RxRing; |
cfb739b4 GKH |
555 | |
556 | /* Setup each RFD */ | |
4fbdf811 AC |
557 | for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) { |
558 | rfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside, | |
cfb739b4 GKH |
559 | GFP_ATOMIC | GFP_DMA); |
560 | ||
4fbdf811 | 561 | if (!rfd) { |
15700039 | 562 | dev_err(&adapter->pdev->dev, |
cfb739b4 GKH |
563 | "Couldn't alloc RFD out of kmem_cache\n"); |
564 | status = -ENOMEM; | |
565 | continue; | |
566 | } | |
567 | ||
f432c55e | 568 | rfd->Packet = NULL; |
cfb739b4 GKH |
569 | |
570 | /* Add this RFD to the RecvList */ | |
4fbdf811 | 571 | list_add_tail(&rfd->list_node, &rx_ring->RecvList); |
cfb739b4 GKH |
572 | |
573 | /* Increment both the available RFD's, and the total RFD's. */ | |
574 | rx_ring->nReadyRecv++; | |
4fbdf811 | 575 | numrfd++; |
cfb739b4 GKH |
576 | } |
577 | ||
4fbdf811 | 578 | if (numrfd > NIC_MIN_NUM_RFD) |
cfb739b4 | 579 | status = 0; |
cfb739b4 | 580 | |
4fbdf811 | 581 | rx_ring->NumRfd = numrfd; |
cfb739b4 GKH |
582 | |
583 | if (status != 0) { | |
4fbdf811 | 584 | kmem_cache_free(rx_ring->RecvLookaside, rfd); |
15700039 | 585 | dev_err(&adapter->pdev->dev, |
cfb739b4 GKH |
586 | "Allocation problems in et131x_init_recv\n"); |
587 | } | |
cfb739b4 GKH |
588 | return status; |
589 | } | |
590 | ||
cfb739b4 GKH |
591 | /** |
592 | * ConfigRxDmaRegs - Start of Rx_DMA init sequence | |
25ad00bb | 593 | * @etdev: pointer to our adapter structure |
cfb739b4 | 594 | */ |
25ad00bb | 595 | void ConfigRxDmaRegs(struct et131x_adapter *etdev) |
cfb739b4 | 596 | { |
9fa81099 | 597 | struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma; |
4fbdf811 | 598 | struct _rx_ring_t *rx_local = &etdev->RxRing; |
9fa81099 | 599 | PFBR_DESC_t fbr_entry; |
4fbdf811 | 600 | u32 entry; |
2e5e0b89 | 601 | u32 psr_num_des; |
37628606 | 602 | unsigned long flags; |
cfb739b4 | 603 | |
cfb739b4 | 604 | /* Halt RXDMA to perform the reconfigure. */ |
25ad00bb | 605 | et131x_rx_dma_disable(etdev); |
cfb739b4 GKH |
606 | |
607 | /* Load the completion writeback physical address | |
608 | * | |
609 | * NOTE : pci_alloc_consistent(), used above to alloc DMA regions, | |
610 | * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses | |
611 | * are ever returned, make sure the high part is retrieved here | |
612 | * before storing the adjusted address. | |
613 | */ | |
4fbdf811 | 614 | writel((u32) ((u64)rx_local->pRxStatusPa >> 32), |
9fa81099 | 615 | &rx_dma->dma_wb_base_hi); |
4fbdf811 | 616 | writel((u32) rx_local->pRxStatusPa, &rx_dma->dma_wb_base_lo); |
cfb739b4 | 617 | |
4fbdf811 | 618 | memset(rx_local->pRxStatusVa, 0, sizeof(RX_STATUS_BLOCK_t)); |
cfb739b4 GKH |
619 | |
620 | /* Set the address and parameters of the packet status ring into the | |
621 | * 1310's registers | |
622 | */ | |
4fbdf811 | 623 | writel((u32) ((u64)rx_local->pPSRingPa >> 32), |
9fa81099 | 624 | &rx_dma->psr_base_hi); |
4fbdf811 | 625 | writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo); |
2e5e0b89 | 626 | writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des); |
99fd99f6 | 627 | writel(0, &rx_dma->psr_full_offset); |
cfb739b4 | 628 | |
2e5e0b89 AC |
629 | psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF; |
630 | writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, | |
631 | &rx_dma->psr_min_des); | |
cfb739b4 | 632 | |
37628606 | 633 | spin_lock_irqsave(&etdev->RcvLock, flags); |
cfb739b4 GKH |
634 | |
635 | /* These local variables track the PSR in the adapter structure */ | |
99fd99f6 | 636 | rx_local->local_psr_full = 0; |
cfb739b4 GKH |
637 | |
638 | /* Now's the best time to initialize FBR1 contents */ | |
4fbdf811 AC |
639 | fbr_entry = (PFBR_DESC_t) rx_local->pFbr1RingVa; |
640 | for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) { | |
641 | fbr_entry->addr_hi = rx_local->Fbr[1]->PAHigh[entry]; | |
642 | fbr_entry->addr_lo = rx_local->Fbr[1]->PALow[entry]; | |
9fa81099 AC |
643 | fbr_entry->word2.bits.bi = entry; |
644 | fbr_entry++; | |
cfb739b4 GKH |
645 | } |
646 | ||
647 | /* Set the address and parameters of Free buffer ring 1 (and 0 if | |
648 | * required) into the 1310's registers | |
649 | */ | |
4fbdf811 AC |
650 | writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi); |
651 | writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo); | |
2e5e0b89 | 652 | writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des); |
356c74b4 | 653 | writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset); |
cfb739b4 GKH |
654 | |
655 | /* This variable tracks the free buffer ring 1 full position, so it | |
656 | * has to match the above. | |
657 | */ | |
4fbdf811 AC |
658 | rx_local->local_Fbr1_full = ET_DMA10_WRAP; |
659 | writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, | |
2e5e0b89 | 660 | &rx_dma->fbr1_min_des); |
cfb739b4 GKH |
661 | |
662 | #ifdef USE_FBR0 | |
663 | /* Now's the best time to initialize FBR0 contents */ | |
4fbdf811 AC |
664 | fbr_entry = (PFBR_DESC_t) rx_local->pFbr0RingVa; |
665 | for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) { | |
666 | fbr_entry->addr_hi = rx_local->Fbr[0]->PAHigh[entry]; | |
667 | fbr_entry->addr_lo = rx_local->Fbr[0]->PALow[entry]; | |
9fa81099 AC |
668 | fbr_entry->word2.bits.bi = entry; |
669 | fbr_entry++; | |
cfb739b4 GKH |
670 | } |
671 | ||
4fbdf811 AC |
672 | writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi); |
673 | writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo); | |
2e5e0b89 | 674 | writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des); |
356c74b4 | 675 | writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset); |
cfb739b4 GKH |
676 | |
677 | /* This variable tracks the free buffer ring 0 full position, so it | |
678 | * has to match the above. | |
679 | */ | |
4fbdf811 AC |
680 | rx_local->local_Fbr0_full = ET_DMA10_WRAP; |
681 | writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, | |
2e5e0b89 | 682 | &rx_dma->fbr0_min_des); |
cfb739b4 GKH |
683 | #endif |
684 | ||
685 | /* Program the number of packets we will receive before generating an | |
686 | * interrupt. | |
687 | * For version B silicon, this value gets updated once autoneg is | |
688 | *complete. | |
689 | */ | |
2e5e0b89 | 690 | writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); |
cfb739b4 GKH |
691 | |
692 | /* The "time_done" is not working correctly to coalesce interrupts | |
693 | * after a given time period, but rather is giving us an interrupt | |
694 | * regardless of whether we have received packets. | |
695 | * This value gets updated once autoneg is complete. | |
696 | */ | |
67947125 | 697 | writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); |
cfb739b4 | 698 | |
37628606 | 699 | spin_unlock_irqrestore(&etdev->RcvLock, flags); |
cfb739b4 GKH |
700 | } |
701 | ||
702 | /** | |
703 | * SetRxDmaTimer - Set the heartbeat timer according to line rate. | |
25ad00bb | 704 | * @etdev: pointer to our adapter structure |
cfb739b4 | 705 | */ |
25ad00bb | 706 | void SetRxDmaTimer(struct et131x_adapter *etdev) |
cfb739b4 GKH |
707 | { |
708 | /* For version B silicon, we do not use the RxDMA timer for 10 and 100 | |
709 | * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. | |
710 | */ | |
9fa81099 AC |
711 | if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) || |
712 | (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) { | |
67947125 | 713 | writel(0, &etdev->regs->rxdma.max_pkt_time); |
2e5e0b89 | 714 | writel(1, &etdev->regs->rxdma.num_pkt_done); |
cfb739b4 GKH |
715 | } |
716 | } | |
717 | ||
718 | /** | |
719 | * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 | |
25ad00bb | 720 | * @etdev: pointer to our adapter structure |
cfb739b4 | 721 | */ |
25ad00bb | 722 | void et131x_rx_dma_disable(struct et131x_adapter *etdev) |
cfb739b4 GKH |
723 | { |
724 | RXDMA_CSR_t csr; | |
725 | ||
cfb739b4 | 726 | /* Setup the receive dma configuration register */ |
f3f415a3 AC |
727 | writel(0x00002001, &etdev->regs->rxdma.csr.value); |
728 | csr.value = readl(&etdev->regs->rxdma.csr.value); | |
cfb739b4 GKH |
729 | if (csr.bits.halt_status != 1) { |
730 | udelay(5); | |
f3f415a3 | 731 | csr.value = readl(&etdev->regs->rxdma.csr.value); |
64f93036 | 732 | if (csr.bits.halt_status != 1) |
15700039 | 733 | dev_err(&etdev->pdev->dev, |
64f93036 AC |
734 | "RX Dma failed to enter halt state. CSR 0x%08x\n", |
735 | csr.value); | |
cfb739b4 | 736 | } |
cfb739b4 GKH |
737 | } |
738 | ||
739 | /** | |
740 | * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. | |
25ad00bb | 741 | * @etdev: pointer to our adapter structure |
cfb739b4 | 742 | */ |
25ad00bb | 743 | void et131x_rx_dma_enable(struct et131x_adapter *etdev) |
cfb739b4 | 744 | { |
cfb739b4 | 745 | /* Setup the receive dma configuration register for normal operation */ |
5f1377d4 AC |
746 | RXDMA_CSR_t csr = { 0 }; |
747 | ||
748 | csr.bits.fbr1_enable = 1; | |
749 | if (etdev->RxRing.Fbr1BufferSize == 4096) | |
750 | csr.bits.fbr1_size = 1; | |
751 | else if (etdev->RxRing.Fbr1BufferSize == 8192) | |
752 | csr.bits.fbr1_size = 2; | |
753 | else if (etdev->RxRing.Fbr1BufferSize == 16384) | |
754 | csr.bits.fbr1_size = 3; | |
cfb739b4 | 755 | #ifdef USE_FBR0 |
5f1377d4 AC |
756 | csr.bits.fbr0_enable = 1; |
757 | if (etdev->RxRing.Fbr0BufferSize == 256) | |
758 | csr.bits.fbr0_size = 1; | |
759 | else if (etdev->RxRing.Fbr0BufferSize == 512) | |
760 | csr.bits.fbr0_size = 2; | |
761 | else if (etdev->RxRing.Fbr0BufferSize == 1024) | |
762 | csr.bits.fbr0_size = 3; | |
cfb739b4 | 763 | #endif |
5f1377d4 | 764 | writel(csr.value, &etdev->regs->rxdma.csr.value); |
cfb739b4 | 765 | |
5f1377d4 AC |
766 | csr.value = readl(&etdev->regs->rxdma.csr.value); |
767 | if (csr.bits.halt_status != 0) { | |
768 | udelay(5); | |
f3f415a3 | 769 | csr.value = readl(&etdev->regs->rxdma.csr.value); |
cfb739b4 | 770 | if (csr.bits.halt_status != 0) { |
5f1377d4 AC |
771 | dev_err(&etdev->pdev->dev, |
772 | "RX Dma failed to exit halt state. CSR 0x%08x\n", | |
773 | csr.value); | |
cfb739b4 GKH |
774 | } |
775 | } | |
cfb739b4 GKH |
776 | } |
777 | ||
778 | /** | |
779 | * nic_rx_pkts - Checks the hardware for available packets | |
25ad00bb | 780 | * @etdev: pointer to our adapter |
cfb739b4 | 781 | * |
4fbdf811 | 782 | * Returns rfd, a pointer to our MPRFD. |
cfb739b4 GKH |
783 | * |
784 | * Checks the hardware for available packets, using completion ring | |
785 | * If packets are available, it gets an RFD from the RecvList, attaches | |
786 | * the packet to it, puts the RFD in the RecvPendList, and also returns | |
787 | * the pointer to the RFD. | |
788 | */ | |
25ad00bb | 789 | PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev) |
cfb739b4 | 790 | { |
4fbdf811 AC |
791 | struct _rx_ring_t *rx_local = &etdev->RxRing; |
792 | PRX_STATUS_BLOCK_t status; | |
793 | PPKT_STAT_DESC_t psr; | |
794 | PMP_RFD rfd; | |
795 | u32 i; | |
796 | uint8_t *buf; | |
37628606 | 797 | unsigned long flags; |
cfb739b4 | 798 | struct list_head *element; |
4fbdf811 AC |
799 | uint8_t rindex; |
800 | uint16_t bindex; | |
801 | u32 len; | |
cfb739b4 GKH |
802 | PKT_STAT_DESC_WORD0_t Word0; |
803 | ||
cfb739b4 GKH |
804 | /* RX Status block is written by the DMA engine prior to every |
805 | * interrupt. It contains the next to be used entry in the Packet | |
806 | * Status Ring, and also the two Free Buffer rings. | |
807 | */ | |
4fbdf811 | 808 | status = (PRX_STATUS_BLOCK_t) rx_local->pRxStatusVa; |
cfb739b4 | 809 | |
99fd99f6 | 810 | /* FIXME: tidy later when conversions complete */ |
4fbdf811 | 811 | if (status->Word1.bits.PSRoffset == |
99fd99f6 | 812 | (rx_local->local_psr_full & 0xFFF) && |
4fbdf811 | 813 | status->Word1.bits.PSRwrap == |
99fd99f6 | 814 | ((rx_local->local_psr_full >> 12) & 1)) { |
cfb739b4 | 815 | /* Looks like this ring is not updated yet */ |
cfb739b4 GKH |
816 | return NULL; |
817 | } | |
818 | ||
819 | /* The packet status ring indicates that data is available. */ | |
4fbdf811 | 820 | psr = (PPKT_STAT_DESC_t) (rx_local->pPSRingVa) + |
99fd99f6 | 821 | (rx_local->local_psr_full & 0xFFF); |
cfb739b4 GKH |
822 | |
823 | /* Grab any information that is required once the PSR is | |
824 | * advanced, since we can no longer rely on the memory being | |
825 | * accurate | |
826 | */ | |
4fbdf811 AC |
827 | len = psr->word1.bits.length; |
828 | rindex = (uint8_t) psr->word1.bits.ri; | |
829 | bindex = (uint16_t) psr->word1.bits.bi; | |
830 | Word0 = psr->word0; | |
cfb739b4 | 831 | |
cfb739b4 | 832 | /* Indicate that we have used this PSR entry. */ |
99fd99f6 | 833 | /* FIXME wrap 12 */ |
d31a2ff0 AC |
834 | add_12bit(&rx_local->local_psr_full, 1); |
835 | if ((rx_local->local_psr_full & 0xFFF) > rx_local->PsrNumEntries - 1) { | |
99fd99f6 | 836 | /* Clear psr full and toggle the wrap bit */ |
d31a2ff0 | 837 | rx_local->local_psr_full &= ~0xFFF; |
99fd99f6 | 838 | rx_local->local_psr_full ^= 0x1000; |
cfb739b4 GKH |
839 | } |
840 | ||
99fd99f6 AC |
841 | writel(rx_local->local_psr_full, |
842 | &etdev->regs->rxdma.psr_full_offset); | |
cfb739b4 GKH |
843 | |
844 | #ifndef USE_FBR0 | |
4fbdf811 | 845 | if (rindex != 1) { |
cfb739b4 GKH |
846 | return NULL; |
847 | } | |
848 | #endif | |
849 | ||
850 | #ifdef USE_FBR0 | |
4fbdf811 AC |
851 | if (rindex > 1 || |
852 | (rindex == 0 && | |
853 | bindex > rx_local->Fbr0NumEntries - 1) || | |
854 | (rindex == 1 && | |
855 | bindex > rx_local->Fbr1NumEntries - 1)) | |
cfb739b4 | 856 | #else |
4fbdf811 AC |
857 | if (rindex != 1 || |
858 | bindex > rx_local->Fbr1NumEntries - 1) | |
cfb739b4 GKH |
859 | #endif |
860 | { | |
861 | /* Illegal buffer or ring index cannot be used by S/W*/ | |
15700039 | 862 | dev_err(&etdev->pdev->dev, |
cfb739b4 GKH |
863 | "NICRxPkts PSR Entry %d indicates " |
864 | "length of %d and/or bad bi(%d)\n", | |
99fd99f6 | 865 | rx_local->local_psr_full & 0xFFF, |
4fbdf811 | 866 | len, bindex); |
cfb739b4 GKH |
867 | return NULL; |
868 | } | |
869 | ||
870 | /* Get and fill the RFD. */ | |
37628606 | 871 | spin_lock_irqsave(&etdev->RcvLock, flags); |
cfb739b4 | 872 | |
4fbdf811 AC |
873 | rfd = NULL; |
874 | element = rx_local->RecvList.next; | |
875 | rfd = (PMP_RFD) list_entry(element, MP_RFD, list_node); | |
cfb739b4 | 876 | |
4fbdf811 | 877 | if (rfd == NULL) { |
37628606 | 878 | spin_unlock_irqrestore(&etdev->RcvLock, flags); |
cfb739b4 GKH |
879 | return NULL; |
880 | } | |
881 | ||
4fbdf811 AC |
882 | list_del(&rfd->list_node); |
883 | rx_local->nReadyRecv--; | |
cfb739b4 | 884 | |
37628606 | 885 | spin_unlock_irqrestore(&etdev->RcvLock, flags); |
cfb739b4 | 886 | |
4fbdf811 AC |
887 | rfd->bufferindex = bindex; |
888 | rfd->ringindex = rindex; | |
cfb739b4 GKH |
889 | |
890 | /* In V1 silicon, there is a bug which screws up filtering of | |
891 | * runt packets. Therefore runt packet filtering is disabled | |
892 | * in the MAC and the packets are dropped here. They are | |
893 | * also counted here. | |
894 | */ | |
4fbdf811 | 895 | if (len < (NIC_MIN_PACKET_SIZE + 4)) { |
25ad00bb | 896 | etdev->Stats.other_errors++; |
4fbdf811 | 897 | len = 0; |
cfb739b4 GKH |
898 | } |
899 | ||
4fbdf811 | 900 | if (len) { |
25ad00bb | 901 | if (etdev->ReplicaPhyLoopbk == 1) { |
4fbdf811 | 902 | buf = rx_local->Fbr[rindex]->Va[bindex]; |
cfb739b4 | 903 | |
4fbdf811 | 904 | if (memcmp(&buf[6], &etdev->CurrentAddress[0], |
cfb739b4 | 905 | ETH_ALEN) == 0) { |
4fbdf811 | 906 | if (memcmp(&buf[42], "Replica packet", |
cfb739b4 | 907 | ETH_HLEN)) { |
25ad00bb | 908 | etdev->ReplicaPhyLoopbkPF = 1; |
cfb739b4 GKH |
909 | } |
910 | } | |
cfb739b4 GKH |
911 | } |
912 | ||
913 | /* Determine if this is a multicast packet coming in */ | |
914 | if ((Word0.value & ALCATEL_MULTICAST_PKT) && | |
915 | !(Word0.value & ALCATEL_BROADCAST_PKT)) { | |
916 | /* Promiscuous mode and Multicast mode are | |
917 | * not mutually exclusive as was first | |
918 | * thought. I guess Promiscuous is just | |
919 | * considered a super-set of the other | |
920 | * filters. Generally filter is 0x2b when in | |
921 | * promiscuous mode. | |
922 | */ | |
25ad00bb AC |
923 | if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST) |
924 | && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS) | |
925 | && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { | |
4fbdf811 AC |
926 | buf = rx_local->Fbr[rindex]-> |
927 | Va[bindex]; | |
cfb739b4 GKH |
928 | |
929 | /* Loop through our list to see if the | |
930 | * destination address of this packet | |
931 | * matches one in our list. | |
932 | */ | |
4fbdf811 AC |
933 | for (i = 0; |
934 | i < etdev->MCAddressCount; | |
935 | i++) { | |
936 | if (buf[0] == | |
937 | etdev->MCList[i][0] | |
938 | && buf[1] == | |
939 | etdev->MCList[i][1] | |
940 | && buf[2] == | |
941 | etdev->MCList[i][2] | |
942 | && buf[3] == | |
943 | etdev->MCList[i][3] | |
944 | && buf[4] == | |
945 | etdev->MCList[i][4] | |
946 | && buf[5] == | |
947 | etdev->MCList[i][5]) { | |
cfb739b4 GKH |
948 | break; |
949 | } | |
950 | } | |
951 | ||
952 | /* If our index is equal to the number | |
953 | * of Multicast address we have, then | |
954 | * this means we did not find this | |
955 | * packet's matching address in our | |
956 | * list. Set the PacketSize to zero, | |
957 | * so we free our RFD when we return | |
958 | * from this function. | |
959 | */ | |
4fbdf811 AC |
960 | if (i == etdev->MCAddressCount) |
961 | len = 0; | |
cfb739b4 GKH |
962 | } |
963 | ||
4fbdf811 | 964 | if (len > 0) |
25ad00bb | 965 | etdev->Stats.multircv++; |
64f93036 | 966 | } else if (Word0.value & ALCATEL_BROADCAST_PKT) |
25ad00bb | 967 | etdev->Stats.brdcstrcv++; |
64f93036 | 968 | else |
cfb739b4 GKH |
969 | /* Not sure what this counter measures in |
970 | * promiscuous mode. Perhaps we should check | |
971 | * the MAC address to see if it is directed | |
972 | * to us in promiscuous mode. | |
973 | */ | |
25ad00bb | 974 | etdev->Stats.unircv++; |
cfb739b4 GKH |
975 | } |
976 | ||
4fbdf811 | 977 | if (len > 0) { |
cfb739b4 GKH |
978 | struct sk_buff *skb = NULL; |
979 | ||
4fbdf811 AC |
980 | /* rfd->PacketSize = len - 4; */ |
981 | rfd->PacketSize = len; | |
cfb739b4 | 982 | |
4fbdf811 | 983 | skb = dev_alloc_skb(rfd->PacketSize + 2); |
cfb739b4 | 984 | if (!skb) { |
15700039 | 985 | dev_err(&etdev->pdev->dev, |
cfb739b4 | 986 | "Couldn't alloc an SKB for Rx\n"); |
cfb739b4 GKH |
987 | return NULL; |
988 | } | |
989 | ||
4fbdf811 | 990 | etdev->net_stats.rx_bytes += rfd->PacketSize; |
cfb739b4 | 991 | |
4fbdf811 AC |
992 | memcpy(skb_put(skb, rfd->PacketSize), |
993 | rx_local->Fbr[rindex]->Va[bindex], | |
994 | rfd->PacketSize); | |
cfb739b4 | 995 | |
25ad00bb AC |
996 | skb->dev = etdev->netdev; |
997 | skb->protocol = eth_type_trans(skb, etdev->netdev); | |
cfb739b4 GKH |
998 | skb->ip_summed = CHECKSUM_NONE; |
999 | ||
1000 | netif_rx(skb); | |
1001 | } else { | |
4fbdf811 | 1002 | rfd->PacketSize = 0; |
cfb739b4 GKH |
1003 | } |
1004 | ||
4fbdf811 AC |
1005 | nic_return_rfd(etdev, rfd); |
1006 | return rfd; | |
cfb739b4 GKH |
1007 | } |
1008 | ||
1009 | /** | |
1010 | * et131x_reset_recv - Reset the receive list | |
25ad00bb | 1011 | * @etdev: pointer to our adapter |
cfb739b4 GKH |
1012 | * |
1013 | * Assumption, Rcv spinlock has been acquired. | |
1014 | */ | |
25ad00bb | 1015 | void et131x_reset_recv(struct et131x_adapter *etdev) |
cfb739b4 | 1016 | { |
15700039 | 1017 | WARN_ON(list_empty(&etdev->RxRing.RecvList)); |
cfb739b4 | 1018 | |
cfb739b4 GKH |
1019 | } |
1020 | ||
1021 | /** | |
1022 | * et131x_handle_recv_interrupt - Interrupt handler for receive processing | |
25ad00bb | 1023 | * @etdev: pointer to our adapter |
cfb739b4 GKH |
1024 | * |
1025 | * Assumption, Rcv spinlock has been acquired. | |
1026 | */ | |
25ad00bb | 1027 | void et131x_handle_recv_interrupt(struct et131x_adapter *etdev) |
cfb739b4 | 1028 | { |
4fbdf811 | 1029 | PMP_RFD rfd = NULL; |
4fbdf811 | 1030 | u32 count = 0; |
4fbdf811 | 1031 | bool done = true; |
cfb739b4 GKH |
1032 | |
1033 | /* Process up to available RFD's */ | |
4fbdf811 | 1034 | while (count < NUM_PACKETS_HANDLED) { |
25ad00bb | 1035 | if (list_empty(&etdev->RxRing.RecvList)) { |
15700039 | 1036 | WARN_ON(etdev->RxRing.nReadyRecv != 0); |
4fbdf811 | 1037 | done = false; |
cfb739b4 GKH |
1038 | break; |
1039 | } | |
1040 | ||
4fbdf811 | 1041 | rfd = nic_rx_pkts(etdev); |
cfb739b4 | 1042 | |
4fbdf811 | 1043 | if (rfd == NULL) |
cfb739b4 | 1044 | break; |
cfb739b4 GKH |
1045 | |
1046 | /* Do not receive any packets until a filter has been set. | |
cfb739b4 GKH |
1047 | * Do not receive any packets until we have link. |
1048 | * If length is zero, return the RFD in order to advance the | |
1049 | * Free buffer ring. | |
1050 | */ | |
f6b35d66 AC |
1051 | if (!etdev->PacketFilter || |
1052 | !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) || | |
4fbdf811 | 1053 | rfd->PacketSize == 0) { |
cfb739b4 GKH |
1054 | continue; |
1055 | } | |
1056 | ||
1057 | /* Increment the number of packets we received */ | |
25ad00bb | 1058 | etdev->Stats.ipackets++; |
cfb739b4 GKH |
1059 | |
1060 | /* Set the status on the packet, either resources or success */ | |
7f59b1bf | 1061 | if (etdev->RxRing.nReadyRecv < RFD_LOW_WATER_MARK) { |
15700039 AC |
1062 | dev_warn(&etdev->pdev->dev, |
1063 | "RFD's are running out\n"); | |
cfb739b4 | 1064 | } |
4fbdf811 | 1065 | count++; |
cfb739b4 GKH |
1066 | } |
1067 | ||
4fbdf811 | 1068 | if (count == NUM_PACKETS_HANDLED || !done) { |
25ad00bb | 1069 | etdev->RxRing.UnfinishedReceives = true; |
c2557177 | 1070 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, |
f3f415a3 | 1071 | &etdev->regs->global.watchdog_timer); |
4fbdf811 | 1072 | } else |
cfb739b4 | 1073 | /* Watchdog timer will disable itself if appropriate. */ |
25ad00bb | 1074 | etdev->RxRing.UnfinishedReceives = false; |
cfb739b4 GKH |
1075 | } |
1076 | ||
356c74b4 AC |
1077 | static inline u32 bump_fbr(u32 *fbr, u32 limit) |
1078 | { | |
b9d2dde0 AC |
1079 | u32 v = *fbr; |
1080 | v++; | |
1081 | /* This works for all cases where limit < 1024. The 1023 case | |
1082 | works because 1023++ is 1024 which means the if condition is not | |
1083 | taken but the carry of the bit into the wrap bit toggles the wrap | |
1084 | value correctly */ | |
1085 | if ((v & ET_DMA10_MASK) > limit) { | |
1086 | v &= ~ET_DMA10_MASK; | |
1087 | v ^= ET_DMA10_WRAP; | |
1088 | } | |
1089 | /* For the 1023 case */ | |
1090 | v &= (ET_DMA10_MASK|ET_DMA10_WRAP); | |
1091 | *fbr = v; | |
1092 | return v; | |
356c74b4 AC |
1093 | } |
1094 | ||
cfb739b4 GKH |
1095 | /** |
1096 | * NICReturnRFD - Recycle a RFD and put it back onto the receive list | |
25ad00bb | 1097 | * @etdev: pointer to our adapter |
4fbdf811 | 1098 | * @rfd: pointer to the RFD |
cfb739b4 | 1099 | */ |
4fbdf811 | 1100 | void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD rfd) |
cfb739b4 | 1101 | { |
9fa81099 AC |
1102 | struct _rx_ring_t *rx_local = &etdev->RxRing; |
1103 | struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma; | |
4fbdf811 AC |
1104 | uint16_t bi = rfd->bufferindex; |
1105 | uint8_t ri = rfd->ringindex; | |
37628606 | 1106 | unsigned long flags; |
cfb739b4 | 1107 | |
cfb739b4 GKH |
1108 | /* We don't use any of the OOB data besides status. Otherwise, we |
1109 | * need to clean up OOB data | |
1110 | */ | |
1111 | if ( | |
1112 | #ifdef USE_FBR0 | |
9fa81099 | 1113 | (ri == 0 && bi < rx_local->Fbr0NumEntries) || |
cfb739b4 | 1114 | #endif |
9fa81099 | 1115 | (ri == 1 && bi < rx_local->Fbr1NumEntries)) { |
37628606 | 1116 | spin_lock_irqsave(&etdev->FbrLock, flags); |
cfb739b4 GKH |
1117 | |
1118 | if (ri == 1) { | |
4fbdf811 | 1119 | PFBR_DESC_t next = |
9fa81099 | 1120 | (PFBR_DESC_t) (rx_local->pFbr1RingVa) + |
356c74b4 | 1121 | INDEX10(rx_local->local_Fbr1_full); |
cfb739b4 GKH |
1122 | |
1123 | /* Handle the Free Buffer Ring advancement here. Write | |
1124 | * the PA / Buffer Index for the returned buffer into | |
1125 | * the oldest (next to be freed)FBR entry | |
1126 | */ | |
4fbdf811 AC |
1127 | next->addr_hi = rx_local->Fbr[1]->PAHigh[bi]; |
1128 | next->addr_lo = rx_local->Fbr[1]->PALow[bi]; | |
1129 | next->word2.value = bi; | |
cfb739b4 | 1130 | |
356c74b4 AC |
1131 | writel(bump_fbr(&rx_local->local_Fbr1_full, |
1132 | rx_local->Fbr1NumEntries - 1), | |
1133 | &rx_dma->fbr1_full_offset); | |
cfb739b4 GKH |
1134 | } |
1135 | #ifdef USE_FBR0 | |
1136 | else { | |
4fbdf811 | 1137 | PFBR_DESC_t next = |
9fa81099 | 1138 | (PFBR_DESC_t) rx_local->pFbr0RingVa + |
356c74b4 | 1139 | INDEX10(rx_local->local_Fbr0_full); |
cfb739b4 GKH |
1140 | |
1141 | /* Handle the Free Buffer Ring advancement here. Write | |
1142 | * the PA / Buffer Index for the returned buffer into | |
1143 | * the oldest (next to be freed) FBR entry | |
1144 | */ | |
4fbdf811 AC |
1145 | next->addr_hi = rx_local->Fbr[0]->PAHigh[bi]; |
1146 | next->addr_lo = rx_local->Fbr[0]->PALow[bi]; | |
1147 | next->word2.value = bi; | |
cfb739b4 | 1148 | |
356c74b4 AC |
1149 | writel(bump_fbr(&rx_local->local_Fbr0_full, |
1150 | rx_local->Fbr0NumEntries - 1), | |
1151 | &rx_dma->fbr0_full_offset); | |
cfb739b4 GKH |
1152 | } |
1153 | #endif | |
37628606 | 1154 | spin_unlock_irqrestore(&etdev->FbrLock, flags); |
cfb739b4 | 1155 | } else { |
15700039 | 1156 | dev_err(&etdev->pdev->dev, |
cfb739b4 GKH |
1157 | "NICReturnRFD illegal Buffer Index returned\n"); |
1158 | } | |
1159 | ||
1160 | /* The processing on this RFD is done, so put it back on the tail of | |
1161 | * our list | |
1162 | */ | |
37628606 | 1163 | spin_lock_irqsave(&etdev->RcvLock, flags); |
4fbdf811 | 1164 | list_add_tail(&rfd->list_node, &rx_local->RecvList); |
9fa81099 | 1165 | rx_local->nReadyRecv++; |
37628606 | 1166 | spin_unlock_irqrestore(&etdev->RcvLock, flags); |
cfb739b4 | 1167 | |
15700039 | 1168 | WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd); |
cfb739b4 | 1169 | } |