Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
3396c782 | 2 | drivers/net/ethernet/dec/tulip/interrupt.c |
1da177e4 | 3 | |
1da177e4 LT |
4 | Copyright 2000,2001 The Linux Kernel Team |
5 | Written/copyright 1994-2001 by Donald Becker. | |
6 | ||
7 | This software may be used and distributed according to the terms | |
8 | of the GNU General Public License, incorporated herein by reference. | |
9 | ||
78a65518 | 10 | Please submit bugs to http://bugzilla.kernel.org/ . |
1da177e4 LT |
11 | */ |
12 | ||
13 | #include <linux/pci.h> | |
14 | #include "tulip.h" | |
1da177e4 LT |
15 | #include <linux/etherdevice.h> |
16 | ||
17 | int tulip_rx_copybreak; | |
18 | unsigned int tulip_max_interrupt_work; | |
19 | ||
20 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | |
21 | #define MIT_SIZE 15 | |
22 | #define MIT_TABLE 15 /* We use 0 or max */ | |
23 | ||
24 | static unsigned int mit_table[MIT_SIZE+1] = | |
25 | { | |
26 | /* CRS11 21143 hardware Mitigation Control Interrupt | |
27 | We use only RX mitigation we other techniques for | |
28 | TX intr. mitigation. | |
29 | ||
30 | 31 Cycle Size (timer control) | |
31 | 30:27 TX timer in 16 * Cycle size | |
32 | 26:24 TX No pkts before Int. | |
33 | 23:20 RX timer in Cycle size | |
34 | 19:17 RX No pkts before Int. | |
35 | 16 Continues Mode (CM) | |
36 | */ | |
37 | ||
38 | 0x0, /* IM disabled */ | |
39 | 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */ | |
40 | 0x80150000, | |
41 | 0x80270000, | |
42 | 0x80370000, | |
43 | 0x80490000, | |
44 | 0x80590000, | |
45 | 0x80690000, | |
46 | 0x807B0000, | |
47 | 0x808B0000, | |
48 | 0x809D0000, | |
49 | 0x80AD0000, | |
50 | 0x80BD0000, | |
51 | 0x80CF0000, | |
52 | 0x80DF0000, | |
53 | // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */ | |
54 | 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */ | |
55 | }; | |
56 | #endif | |
57 | ||
58 | ||
59 | int tulip_refill_rx(struct net_device *dev) | |
60 | { | |
61 | struct tulip_private *tp = netdev_priv(dev); | |
62 | int entry; | |
63 | int refilled = 0; | |
64 | ||
65 | /* Refill the Rx ring buffers. */ | |
66 | for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) { | |
67 | entry = tp->dirty_rx % RX_RING_SIZE; | |
68 | if (tp->rx_buffers[entry].skb == NULL) { | |
69 | struct sk_buff *skb; | |
70 | dma_addr_t mapping; | |
71 | ||
21a4e469 PD |
72 | skb = tp->rx_buffers[entry].skb = |
73 | netdev_alloc_skb(dev, PKT_BUF_SZ); | |
1da177e4 LT |
74 | if (skb == NULL) |
75 | break; | |
76 | ||
689be439 | 77 | mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, |
1da177e4 | 78 | PCI_DMA_FROMDEVICE); |
c9bfbb31 NH |
79 | if (dma_mapping_error(&tp->pdev->dev, mapping)) { |
80 | dev_kfree_skb(skb); | |
81 | tp->rx_buffers[entry].skb = NULL; | |
82 | break; | |
83 | } | |
84 | ||
1da177e4 LT |
85 | tp->rx_buffers[entry].mapping = mapping; |
86 | ||
1da177e4 LT |
87 | tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); |
88 | refilled++; | |
89 | } | |
90 | tp->rx_ring[entry].status = cpu_to_le32(DescOwned); | |
91 | } | |
92 | if(tp->chip_id == LC82C168) { | |
93 | if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) { | |
94 | /* Rx stopped due to out of buffers, | |
95 | * restart it | |
96 | */ | |
97 | iowrite32(0x01, tp->base_addr + CSR2); | |
98 | } | |
99 | } | |
100 | return refilled; | |
101 | } | |
102 | ||
103 | #ifdef CONFIG_TULIP_NAPI | |
104 | ||
105 | void oom_timer(unsigned long data) | |
106 | { | |
107 | struct net_device *dev = (struct net_device *)data; | |
bea3348e | 108 | struct tulip_private *tp = netdev_priv(dev); |
288379f0 | 109 | napi_schedule(&tp->napi); |
1da177e4 LT |
110 | } |
111 | ||
bea3348e | 112 | int tulip_poll(struct napi_struct *napi, int budget) |
1da177e4 | 113 | { |
bea3348e SH |
114 | struct tulip_private *tp = container_of(napi, struct tulip_private, napi); |
115 | struct net_device *dev = tp->dev; | |
1da177e4 | 116 | int entry = tp->cur_rx % RX_RING_SIZE; |
bea3348e SH |
117 | int work_done = 0; |
118 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | |
1da177e4 | 119 | int received = 0; |
bea3348e | 120 | #endif |
1da177e4 | 121 | |
1da177e4 LT |
122 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
123 | ||
124 | /* that one buffer is needed for mit activation; or might be a | |
125 | bug in the ring buffer code; check later -- JHS*/ | |
126 | ||
bea3348e | 127 | if (budget >=RX_RING_SIZE) budget--; |
1da177e4 LT |
128 | #endif |
129 | ||
130 | if (tulip_debug > 4) | |
726b65ad JP |
131 | netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n", |
132 | entry, tp->rx_ring[entry].status); | |
1da177e4 LT |
133 | |
134 | do { | |
135 | if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { | |
726b65ad | 136 | netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n"); |
1da177e4 LT |
137 | break; |
138 | } | |
139 | /* Acknowledge current RX interrupt sources. */ | |
140 | iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); | |
f3b197ac JG |
141 | |
142 | ||
1da177e4 LT |
143 | /* If we own the next entry, it is a new packet. Send it up. */ |
144 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { | |
145 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); | |
1f8ae0a2 | 146 | short pkt_len; |
f3b197ac | 147 | |
1da177e4 LT |
148 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) |
149 | break; | |
f3b197ac | 150 | |
726b65ad JP |
151 | if (tulip_debug > 5) |
152 | netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", | |
153 | entry, status); | |
c6a1b62d SH |
154 | |
155 | if (++work_done >= budget) | |
1da177e4 | 156 | goto not_done; |
f3b197ac | 157 | |
1f8ae0a2 TL |
158 | /* |
159 | * Omit the four octet CRC from the length. | |
160 | * (May not be considered valid until we have | |
161 | * checked status for RxLengthOver2047 bits) | |
162 | */ | |
163 | pkt_len = ((status >> 16) & 0x7ff) - 4; | |
164 | ||
165 | /* | |
166 | * Maximum pkt_len is 1518 (1514 + vlan header) | |
167 | * Anything higher than this is always invalid | |
168 | * regardless of RxLengthOver2047 bits | |
169 | */ | |
170 | ||
171 | if ((status & (RxLengthOver2047 | | |
172 | RxDescCRCError | | |
173 | RxDescCollisionSeen | | |
174 | RxDescRunt | | |
175 | RxDescDescErr | | |
8e95a202 JP |
176 | RxWholePkt)) != RxWholePkt || |
177 | pkt_len > 1518) { | |
1f8ae0a2 TL |
178 | if ((status & (RxLengthOver2047 | |
179 | RxWholePkt)) != RxWholePkt) { | |
1da177e4 LT |
180 | /* Ingore earlier buffers. */ |
181 | if ((status & 0xffff) != 0x7fff) { | |
182 | if (tulip_debug > 1) | |
abe02af8 JP |
183 | dev_warn(&dev->dev, |
184 | "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", | |
185 | status); | |
1a18abaa ED |
186 | dev->stats.rx_length_errors++; |
187 | } | |
1f8ae0a2 | 188 | } else { |
1da177e4 | 189 | /* There was a fatal error. */ |
726b65ad JP |
190 | if (tulip_debug > 2) |
191 | netdev_dbg(dev, "Receive error, Rx status %08x\n", | |
192 | status); | |
1a18abaa ED |
193 | dev->stats.rx_errors++; /* end of a packet.*/ |
194 | if (pkt_len > 1518 || | |
195 | (status & RxDescRunt)) | |
196 | dev->stats.rx_length_errors++; | |
197 | ||
198 | if (status & 0x0004) | |
199 | dev->stats.rx_frame_errors++; | |
200 | if (status & 0x0002) | |
201 | dev->stats.rx_crc_errors++; | |
202 | if (status & 0x0001) | |
203 | dev->stats.rx_fifo_errors++; | |
1da177e4 LT |
204 | } |
205 | } else { | |
1da177e4 | 206 | struct sk_buff *skb; |
f3b197ac | 207 | |
1da177e4 LT |
208 | /* Check if the packet is long enough to accept without copying |
209 | to a minimally-sized skbuff. */ | |
8e95a202 | 210 | if (pkt_len < tulip_rx_copybreak && |
21a4e469 | 211 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
1da177e4 LT |
212 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
213 | pci_dma_sync_single_for_cpu(tp->pdev, | |
214 | tp->rx_buffers[entry].mapping, | |
215 | pkt_len, PCI_DMA_FROMDEVICE); | |
216 | #if ! defined(__alpha__) | |
8c7b7faa DM |
217 | skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, |
218 | pkt_len); | |
1da177e4 LT |
219 | skb_put(skb, pkt_len); |
220 | #else | |
59ae1d12 JB |
221 | skb_put_data(skb, |
222 | tp->rx_buffers[entry].skb->data, | |
223 | pkt_len); | |
1da177e4 LT |
224 | #endif |
225 | pci_dma_sync_single_for_device(tp->pdev, | |
226 | tp->rx_buffers[entry].mapping, | |
227 | pkt_len, PCI_DMA_FROMDEVICE); | |
228 | } else { /* Pass up the skb already on the Rx ring. */ | |
229 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, | |
230 | pkt_len); | |
f3b197ac | 231 | |
1da177e4 LT |
232 | #ifndef final_version |
233 | if (tp->rx_buffers[entry].mapping != | |
234 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { | |
abe02af8 JP |
235 | dev_err(&dev->dev, |
236 | "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n", | |
237 | le32_to_cpu(tp->rx_ring[entry].buffer1), | |
238 | (unsigned long long)tp->rx_buffers[entry].mapping, | |
239 | skb->head, temp); | |
1da177e4 LT |
240 | } |
241 | #endif | |
f3b197ac | 242 | |
1da177e4 LT |
243 | pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, |
244 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | |
f3b197ac | 245 | |
1da177e4 LT |
246 | tp->rx_buffers[entry].skb = NULL; |
247 | tp->rx_buffers[entry].mapping = 0; | |
248 | } | |
249 | skb->protocol = eth_type_trans(skb, dev); | |
f3b197ac | 250 | |
1da177e4 | 251 | netif_receive_skb(skb); |
f3b197ac | 252 | |
1a18abaa ED |
253 | dev->stats.rx_packets++; |
254 | dev->stats.rx_bytes += pkt_len; | |
1da177e4 | 255 | } |
bea3348e SH |
256 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
257 | received++; | |
258 | #endif | |
1da177e4 LT |
259 | |
260 | entry = (++tp->cur_rx) % RX_RING_SIZE; | |
261 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) | |
262 | tulip_refill_rx(dev); | |
f3b197ac | 263 | |
1da177e4 | 264 | } |
f3b197ac | 265 | |
1da177e4 LT |
266 | /* New ack strategy... irq does not ack Rx any longer |
267 | hopefully this helps */ | |
f3b197ac | 268 | |
1da177e4 LT |
269 | /* Really bad things can happen here... If new packet arrives |
270 | * and an irq arrives (tx or just due to occasionally unset | |
271 | * mask), it will be acked by irq handler, but new thread | |
272 | * is not scheduled. It is major hole in design. | |
273 | * No idea how to fix this if "playing with fire" will fail | |
274 | * tomorrow (night 011029). If it will not fail, we won | |
275 | * finally: amount of IO did not increase at all. */ | |
276 | } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); | |
f3b197ac | 277 | |
1da177e4 | 278 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION |
f3b197ac | 279 | |
1da177e4 LT |
280 | /* We use this simplistic scheme for IM. It's proven by |
281 | real life installations. We can have IM enabled | |
f3b197ac JG |
282 | continuesly but this would cause unnecessary latency. |
283 | Unfortunely we can't use all the NET_RX_* feedback here. | |
284 | This would turn on IM for devices that is not contributing | |
285 | to backlog congestion with unnecessary latency. | |
286 | ||
59c51591 | 287 | We monitor the device RX-ring and have: |
f3b197ac | 288 | |
1da177e4 | 289 | HW Interrupt Mitigation either ON or OFF. |
f3b197ac JG |
290 | |
291 | ON: More then 1 pkt received (per intr.) OR we are dropping | |
1da177e4 | 292 | OFF: Only 1 pkt received |
f3b197ac | 293 | |
1da177e4 | 294 | Note. We only use min and max (0, 15) settings from mit_table */ |
f3b197ac JG |
295 | |
296 | ||
1da177e4 LT |
297 | if( tp->flags & HAS_INTR_MITIGATION) { |
298 | if( received > 1 ) { | |
299 | if( ! tp->mit_on ) { | |
300 | tp->mit_on = 1; | |
301 | iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11); | |
302 | } | |
303 | } | |
304 | else { | |
305 | if( tp->mit_on ) { | |
306 | tp->mit_on = 0; | |
307 | iowrite32(0, tp->base_addr + CSR11); | |
308 | } | |
309 | } | |
310 | } | |
311 | ||
312 | #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ | |
f3b197ac | 313 | |
1da177e4 | 314 | tulip_refill_rx(dev); |
f3b197ac | 315 | |
1da177e4 | 316 | /* If RX ring is not full we are out of memory. */ |
bea3348e SH |
317 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
318 | goto oom; | |
f3b197ac | 319 | |
1da177e4 | 320 | /* Remove us from polling list and enable RX intr. */ |
f3b197ac | 321 | |
6ad20165 ED |
322 | napi_complete_done(napi, work_done); |
323 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); | |
f3b197ac | 324 | |
1da177e4 LT |
325 | /* The last op happens after poll completion. Which means the following: |
326 | * 1. it can race with disabling irqs in irq handler | |
327 | * 2. it can race with dise/enabling irqs in other poll threads | |
328 | * 3. if an irq raised after beginning loop, it will be immediately | |
329 | * triggered here. | |
330 | * | |
331 | * Summarizing: the logic results in some redundant irqs both | |
332 | * due to races in masking and due to too late acking of already | |
333 | * processed irqs. But it must not result in losing events. | |
334 | */ | |
f3b197ac | 335 | |
bea3348e | 336 | return work_done; |
f3b197ac | 337 | |
1da177e4 | 338 | not_done: |
1da177e4 LT |
339 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || |
340 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | |
341 | tulip_refill_rx(dev); | |
f3b197ac | 342 | |
bea3348e SH |
343 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) |
344 | goto oom; | |
f3b197ac | 345 | |
bea3348e | 346 | return work_done; |
f3b197ac | 347 | |
1da177e4 | 348 | oom: /* Executed with RX ints disabled */ |
f3b197ac | 349 | |
1da177e4 LT |
350 | /* Start timer, stop polling, but do not enable rx interrupts. */ |
351 | mod_timer(&tp->oom_timer, jiffies+1); | |
f3b197ac | 352 | |
1da177e4 LT |
353 | /* Think: timer_pending() was an explicit signature of bug. |
354 | * Timer can be pending now but fired and completed | |
288379f0 | 355 | * before we did napi_complete(). See? We would lose it. */ |
f3b197ac | 356 | |
1da177e4 | 357 | /* remove ourselves from the polling list */ |
6ad20165 | 358 | napi_complete_done(napi, work_done); |
f3b197ac | 359 | |
bea3348e | 360 | return work_done; |
1da177e4 LT |
361 | } |
362 | ||
363 | #else /* CONFIG_TULIP_NAPI */ | |
364 | ||
365 | static int tulip_rx(struct net_device *dev) | |
366 | { | |
367 | struct tulip_private *tp = netdev_priv(dev); | |
368 | int entry = tp->cur_rx % RX_RING_SIZE; | |
369 | int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; | |
370 | int received = 0; | |
371 | ||
372 | if (tulip_debug > 4) | |
726b65ad JP |
373 | netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", |
374 | entry, tp->rx_ring[entry].status); | |
1da177e4 LT |
375 | /* If we own the next entry, it is a new packet. Send it up. */ |
376 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { | |
377 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); | |
1f8ae0a2 | 378 | short pkt_len; |
1da177e4 LT |
379 | |
380 | if (tulip_debug > 5) | |
726b65ad JP |
381 | netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", |
382 | entry, status); | |
1da177e4 LT |
383 | if (--rx_work_limit < 0) |
384 | break; | |
1f8ae0a2 TL |
385 | |
386 | /* | |
387 | Omit the four octet CRC from the length. | |
388 | (May not be considered valid until we have | |
389 | checked status for RxLengthOver2047 bits) | |
390 | */ | |
391 | pkt_len = ((status >> 16) & 0x7ff) - 4; | |
392 | /* | |
393 | Maximum pkt_len is 1518 (1514 + vlan header) | |
394 | Anything higher than this is always invalid | |
395 | regardless of RxLengthOver2047 bits | |
396 | */ | |
397 | ||
398 | if ((status & (RxLengthOver2047 | | |
399 | RxDescCRCError | | |
400 | RxDescCollisionSeen | | |
401 | RxDescRunt | | |
402 | RxDescDescErr | | |
8e95a202 JP |
403 | RxWholePkt)) != RxWholePkt || |
404 | pkt_len > 1518) { | |
1f8ae0a2 TL |
405 | if ((status & (RxLengthOver2047 | |
406 | RxWholePkt)) != RxWholePkt) { | |
1da177e4 LT |
407 | /* Ingore earlier buffers. */ |
408 | if ((status & 0xffff) != 0x7fff) { | |
409 | if (tulip_debug > 1) | |
726b65ad JP |
410 | netdev_warn(dev, |
411 | "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", | |
412 | status); | |
1a18abaa | 413 | dev->stats.rx_length_errors++; |
1da177e4 | 414 | } |
1f8ae0a2 | 415 | } else { |
1da177e4 LT |
416 | /* There was a fatal error. */ |
417 | if (tulip_debug > 2) | |
726b65ad JP |
418 | netdev_dbg(dev, "Receive error, Rx status %08x\n", |
419 | status); | |
1a18abaa | 420 | dev->stats.rx_errors++; /* end of a packet.*/ |
1f8ae0a2 TL |
421 | if (pkt_len > 1518 || |
422 | (status & RxDescRunt)) | |
1a18abaa ED |
423 | dev->stats.rx_length_errors++; |
424 | if (status & 0x0004) | |
425 | dev->stats.rx_frame_errors++; | |
426 | if (status & 0x0002) | |
427 | dev->stats.rx_crc_errors++; | |
428 | if (status & 0x0001) | |
429 | dev->stats.rx_fifo_errors++; | |
1da177e4 LT |
430 | } |
431 | } else { | |
1da177e4 LT |
432 | struct sk_buff *skb; |
433 | ||
1da177e4 LT |
434 | /* Check if the packet is long enough to accept without copying |
435 | to a minimally-sized skbuff. */ | |
8e95a202 | 436 | if (pkt_len < tulip_rx_copybreak && |
21a4e469 | 437 | (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { |
1da177e4 LT |
438 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
439 | pci_dma_sync_single_for_cpu(tp->pdev, | |
440 | tp->rx_buffers[entry].mapping, | |
441 | pkt_len, PCI_DMA_FROMDEVICE); | |
442 | #if ! defined(__alpha__) | |
8c7b7faa DM |
443 | skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, |
444 | pkt_len); | |
1da177e4 LT |
445 | skb_put(skb, pkt_len); |
446 | #else | |
59ae1d12 JB |
447 | skb_put_data(skb, |
448 | tp->rx_buffers[entry].skb->data, | |
449 | pkt_len); | |
1da177e4 LT |
450 | #endif |
451 | pci_dma_sync_single_for_device(tp->pdev, | |
452 | tp->rx_buffers[entry].mapping, | |
453 | pkt_len, PCI_DMA_FROMDEVICE); | |
454 | } else { /* Pass up the skb already on the Rx ring. */ | |
455 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, | |
456 | pkt_len); | |
457 | ||
458 | #ifndef final_version | |
459 | if (tp->rx_buffers[entry].mapping != | |
460 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { | |
abe02af8 JP |
461 | dev_err(&dev->dev, |
462 | "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n", | |
463 | le32_to_cpu(tp->rx_ring[entry].buffer1), | |
464 | (long long)tp->rx_buffers[entry].mapping, | |
465 | skb->head, temp); | |
1da177e4 LT |
466 | } |
467 | #endif | |
468 | ||
469 | pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, | |
470 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | |
471 | ||
472 | tp->rx_buffers[entry].skb = NULL; | |
473 | tp->rx_buffers[entry].mapping = 0; | |
474 | } | |
475 | skb->protocol = eth_type_trans(skb, dev); | |
476 | ||
477 | netif_rx(skb); | |
478 | ||
1a18abaa ED |
479 | dev->stats.rx_packets++; |
480 | dev->stats.rx_bytes += pkt_len; | |
1da177e4 LT |
481 | } |
482 | received++; | |
483 | entry = (++tp->cur_rx) % RX_RING_SIZE; | |
484 | } | |
485 | return received; | |
486 | } | |
487 | #endif /* CONFIG_TULIP_NAPI */ | |
488 | ||
489 | static inline unsigned int phy_interrupt (struct net_device *dev) | |
490 | { | |
491 | #ifdef __hppa__ | |
492 | struct tulip_private *tp = netdev_priv(dev); | |
493 | int csr12 = ioread32(tp->base_addr + CSR12) & 0xff; | |
494 | ||
495 | if (csr12 != tp->csr12_shadow) { | |
496 | /* ack interrupt */ | |
497 | iowrite32(csr12 | 0x02, tp->base_addr + CSR12); | |
498 | tp->csr12_shadow = csr12; | |
499 | /* do link change stuff */ | |
500 | spin_lock(&tp->lock); | |
501 | tulip_check_duplex(dev); | |
502 | spin_unlock(&tp->lock); | |
503 | /* clear irq ack bit */ | |
504 | iowrite32(csr12 & ~0x02, tp->base_addr + CSR12); | |
505 | ||
506 | return 1; | |
507 | } | |
508 | #endif | |
509 | ||
510 | return 0; | |
511 | } | |
512 | ||
513 | /* The interrupt handler does all of the Rx thread work and cleans up | |
514 | after the Tx thread. */ | |
7d12e780 | 515 | irqreturn_t tulip_interrupt(int irq, void *dev_instance) |
1da177e4 LT |
516 | { |
517 | struct net_device *dev = (struct net_device *)dev_instance; | |
518 | struct tulip_private *tp = netdev_priv(dev); | |
519 | void __iomem *ioaddr = tp->base_addr; | |
520 | int csr5; | |
521 | int missed; | |
522 | int rx = 0; | |
523 | int tx = 0; | |
524 | int oi = 0; | |
525 | int maxrx = RX_RING_SIZE; | |
526 | int maxtx = TX_RING_SIZE; | |
527 | int maxoi = TX_RING_SIZE; | |
528 | #ifdef CONFIG_TULIP_NAPI | |
529 | int rxd = 0; | |
530 | #else | |
531 | int entry; | |
532 | #endif | |
533 | unsigned int work_count = tulip_max_interrupt_work; | |
534 | unsigned int handled = 0; | |
535 | ||
536 | /* Let's see whether the interrupt really is for us */ | |
537 | csr5 = ioread32(ioaddr + CSR5); | |
538 | ||
f3b197ac | 539 | if (tp->flags & HAS_PHY_IRQ) |
1da177e4 | 540 | handled = phy_interrupt (dev); |
f3b197ac | 541 | |
1da177e4 LT |
542 | if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) |
543 | return IRQ_RETVAL(handled); | |
544 | ||
545 | tp->nir++; | |
546 | ||
547 | do { | |
548 | ||
549 | #ifdef CONFIG_TULIP_NAPI | |
550 | ||
551 | if (!rxd && (csr5 & (RxIntr | RxNoBuf))) { | |
552 | rxd++; | |
553 | /* Mask RX intrs and add the device to poll list. */ | |
554 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); | |
288379f0 | 555 | napi_schedule(&tp->napi); |
f3b197ac | 556 | |
1da177e4 LT |
557 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) |
558 | break; | |
559 | } | |
f3b197ac | 560 | |
1da177e4 LT |
561 | /* Acknowledge the interrupt sources we handle here ASAP |
562 | the poll function does Rx and RxNoBuf acking */ | |
f3b197ac | 563 | |
1da177e4 LT |
564 | iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); |
565 | ||
f3b197ac | 566 | #else |
1da177e4 LT |
567 | /* Acknowledge all of the current interrupt sources ASAP. */ |
568 | iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); | |
569 | ||
570 | ||
571 | if (csr5 & (RxIntr | RxNoBuf)) { | |
572 | rx += tulip_rx(dev); | |
573 | tulip_refill_rx(dev); | |
574 | } | |
575 | ||
576 | #endif /* CONFIG_TULIP_NAPI */ | |
f3b197ac | 577 | |
1da177e4 | 578 | if (tulip_debug > 4) |
726b65ad JP |
579 | netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n", |
580 | csr5, ioread32(ioaddr + CSR5)); | |
f3b197ac | 581 | |
1da177e4 LT |
582 | |
583 | if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { | |
584 | unsigned int dirty_tx; | |
585 | ||
586 | spin_lock(&tp->lock); | |
587 | ||
588 | for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; | |
589 | dirty_tx++) { | |
590 | int entry = dirty_tx % TX_RING_SIZE; | |
591 | int status = le32_to_cpu(tp->tx_ring[entry].status); | |
592 | ||
593 | if (status < 0) | |
594 | break; /* It still has not been Txed */ | |
595 | ||
596 | /* Check for Rx filter setup frames. */ | |
597 | if (tp->tx_buffers[entry].skb == NULL) { | |
598 | /* test because dummy frames not mapped */ | |
599 | if (tp->tx_buffers[entry].mapping) | |
600 | pci_unmap_single(tp->pdev, | |
601 | tp->tx_buffers[entry].mapping, | |
602 | sizeof(tp->setup_frame), | |
603 | PCI_DMA_TODEVICE); | |
604 | continue; | |
605 | } | |
606 | ||
607 | if (status & 0x8000) { | |
608 | /* There was an major error, log it. */ | |
609 | #ifndef final_version | |
610 | if (tulip_debug > 1) | |
726b65ad JP |
611 | netdev_dbg(dev, "Transmit error, Tx status %08x\n", |
612 | status); | |
1da177e4 | 613 | #endif |
1a18abaa ED |
614 | dev->stats.tx_errors++; |
615 | if (status & 0x4104) | |
616 | dev->stats.tx_aborted_errors++; | |
617 | if (status & 0x0C00) | |
618 | dev->stats.tx_carrier_errors++; | |
619 | if (status & 0x0200) | |
620 | dev->stats.tx_window_errors++; | |
621 | if (status & 0x0002) | |
622 | dev->stats.tx_fifo_errors++; | |
1da177e4 | 623 | if ((status & 0x0080) && tp->full_duplex == 0) |
1a18abaa | 624 | dev->stats.tx_heartbeat_errors++; |
1da177e4 | 625 | } else { |
1a18abaa | 626 | dev->stats.tx_bytes += |
1da177e4 | 627 | tp->tx_buffers[entry].skb->len; |
1a18abaa ED |
628 | dev->stats.collisions += (status >> 3) & 15; |
629 | dev->stats.tx_packets++; | |
1da177e4 LT |
630 | } |
631 | ||
632 | pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, | |
633 | tp->tx_buffers[entry].skb->len, | |
634 | PCI_DMA_TODEVICE); | |
635 | ||
636 | /* Free the original skb. */ | |
637 | dev_kfree_skb_irq(tp->tx_buffers[entry].skb); | |
638 | tp->tx_buffers[entry].skb = NULL; | |
639 | tp->tx_buffers[entry].mapping = 0; | |
640 | tx++; | |
641 | } | |
642 | ||
643 | #ifndef final_version | |
644 | if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { | |
abe02af8 JP |
645 | dev_err(&dev->dev, |
646 | "Out-of-sync dirty pointer, %d vs. %d\n", | |
647 | dirty_tx, tp->cur_tx); | |
1da177e4 LT |
648 | dirty_tx += TX_RING_SIZE; |
649 | } | |
650 | #endif | |
651 | ||
652 | if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) | |
653 | netif_wake_queue(dev); | |
654 | ||
655 | tp->dirty_tx = dirty_tx; | |
656 | if (csr5 & TxDied) { | |
657 | if (tulip_debug > 2) | |
abe02af8 JP |
658 | dev_warn(&dev->dev, |
659 | "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n", | |
660 | csr5, ioread32(ioaddr + CSR6), | |
661 | tp->csr6); | |
1da177e4 LT |
662 | tulip_restart_rxtx(tp); |
663 | } | |
664 | spin_unlock(&tp->lock); | |
665 | } | |
666 | ||
667 | /* Log errors. */ | |
668 | if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ | |
669 | if (csr5 == 0xffffffff) | |
670 | break; | |
1a18abaa ED |
671 | if (csr5 & TxJabber) |
672 | dev->stats.tx_errors++; | |
1da177e4 LT |
673 | if (csr5 & TxFIFOUnderflow) { |
674 | if ((tp->csr6 & 0xC000) != 0xC000) | |
675 | tp->csr6 += 0x4000; /* Bump up the Tx threshold */ | |
676 | else | |
677 | tp->csr6 |= 0x00200000; /* Store-n-forward. */ | |
678 | /* Restart the transmit process. */ | |
679 | tulip_restart_rxtx(tp); | |
680 | iowrite32(0, ioaddr + CSR1); | |
681 | } | |
682 | if (csr5 & (RxDied | RxNoBuf)) { | |
683 | if (tp->flags & COMET_MAC_ADDR) { | |
684 | iowrite32(tp->mc_filter[0], ioaddr + 0xAC); | |
685 | iowrite32(tp->mc_filter[1], ioaddr + 0xB0); | |
686 | } | |
687 | } | |
688 | if (csr5 & RxDied) { /* Missed a Rx frame. */ | |
1a18abaa ED |
689 | dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; |
690 | dev->stats.rx_errors++; | |
1da177e4 LT |
691 | tulip_start_rxtx(tp); |
692 | } | |
693 | /* | |
694 | * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this | |
695 | * call is ever done under the spinlock | |
696 | */ | |
697 | if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { | |
698 | if (tp->link_change) | |
699 | (tp->link_change)(dev, csr5); | |
700 | } | |
1ddb9861 | 701 | if (csr5 & SystemError) { |
1da177e4 LT |
702 | int error = (csr5 >> 23) & 7; |
703 | /* oops, we hit a PCI error. The code produced corresponds | |
704 | * to the reason: | |
705 | * 0 - parity error | |
706 | * 1 - master abort | |
707 | * 2 - target abort | |
708 | * Note that on parity error, we should do a software reset | |
709 | * of the chip to get it back into a sane state (according | |
710 | * to the 21142/3 docs that is). | |
711 | * -- rmk | |
712 | */ | |
abe02af8 JP |
713 | dev_err(&dev->dev, |
714 | "(%lu) System Error occurred (%d)\n", | |
715 | tp->nir, error); | |
1da177e4 LT |
716 | } |
717 | /* Clear all error sources, included undocumented ones! */ | |
718 | iowrite32(0x0800f7ba, ioaddr + CSR5); | |
719 | oi++; | |
720 | } | |
721 | if (csr5 & TimerInt) { | |
722 | ||
723 | if (tulip_debug > 2) | |
abe02af8 JP |
724 | dev_err(&dev->dev, |
725 | "Re-enabling interrupts, %08x\n", | |
726 | csr5); | |
1da177e4 LT |
727 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); |
728 | tp->ttimer = 0; | |
729 | oi++; | |
730 | } | |
731 | if (tx > maxtx || rx > maxrx || oi > maxoi) { | |
732 | if (tulip_debug > 1) | |
abe02af8 JP |
733 | dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n", |
734 | csr5, tp->nir, tx, rx, oi); | |
1da177e4 LT |
735 | |
736 | /* Acknowledge all interrupt sources. */ | |
737 | iowrite32(0x8001ffff, ioaddr + CSR5); | |
738 | if (tp->flags & HAS_INTR_MITIGATION) { | |
739 | /* Josip Loncaric at ICASE did extensive experimentation | |
740 | to develop a good interrupt mitigation setting.*/ | |
741 | iowrite32(0x8b240000, ioaddr + CSR11); | |
742 | } else if (tp->chip_id == LC82C168) { | |
743 | /* the LC82C168 doesn't have a hw timer.*/ | |
744 | iowrite32(0x00, ioaddr + CSR7); | |
745 | mod_timer(&tp->timer, RUN_AT(HZ/50)); | |
746 | } else { | |
747 | /* Mask all interrupting sources, set timer to | |
748 | re-enable. */ | |
749 | iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7); | |
750 | iowrite32(0x0012, ioaddr + CSR11); | |
751 | } | |
752 | break; | |
753 | } | |
754 | ||
755 | work_count--; | |
756 | if (work_count == 0) | |
757 | break; | |
758 | ||
759 | csr5 = ioread32(ioaddr + CSR5); | |
760 | ||
761 | #ifdef CONFIG_TULIP_NAPI | |
762 | if (rxd) | |
763 | csr5 &= ~RxPollInt; | |
f3b197ac JG |
764 | } while ((csr5 & (TxNoBuf | |
765 | TxDied | | |
766 | TxIntr | | |
1da177e4 LT |
767 | TimerInt | |
768 | /* Abnormal intr. */ | |
f3b197ac JG |
769 | RxDied | |
770 | TxFIFOUnderflow | | |
771 | TxJabber | | |
772 | TPLnkFail | | |
1ddb9861 | 773 | SystemError )) != 0); |
f3b197ac | 774 | #else |
1da177e4 LT |
775 | } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); |
776 | ||
777 | tulip_refill_rx(dev); | |
778 | ||
779 | /* check if the card is in suspend mode */ | |
780 | entry = tp->dirty_rx % RX_RING_SIZE; | |
781 | if (tp->rx_buffers[entry].skb == NULL) { | |
782 | if (tulip_debug > 1) | |
abe02af8 JP |
783 | dev_warn(&dev->dev, |
784 | "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", | |
785 | tp->nir, tp->cur_rx, tp->ttimer, rx); | |
1da177e4 LT |
786 | if (tp->chip_id == LC82C168) { |
787 | iowrite32(0x00, ioaddr + CSR7); | |
788 | mod_timer(&tp->timer, RUN_AT(HZ/50)); | |
789 | } else { | |
790 | if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) { | |
791 | if (tulip_debug > 1) | |
abe02af8 JP |
792 | dev_warn(&dev->dev, |
793 | "in rx suspend mode: (%lu) set timer\n", | |
794 | tp->nir); | |
1da177e4 LT |
795 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt, |
796 | ioaddr + CSR7); | |
797 | iowrite32(TimerInt, ioaddr + CSR5); | |
798 | iowrite32(12, ioaddr + CSR11); | |
799 | tp->ttimer = 1; | |
800 | } | |
801 | } | |
802 | } | |
803 | #endif /* CONFIG_TULIP_NAPI */ | |
804 | ||
805 | if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) { | |
1a18abaa | 806 | dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed; |
1da177e4 LT |
807 | } |
808 | ||
809 | if (tulip_debug > 4) | |
726b65ad JP |
810 | netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n", |
811 | ioread32(ioaddr + CSR5)); | |
1da177e4 LT |
812 | |
813 | return IRQ_HANDLED; | |
814 | } |