Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
[linux-block.git] / drivers / net / tulip / interrupt.c
CommitLineData
1da177e4
LT
1/*
2 drivers/net/tulip/interrupt.c
3
1da177e4
LT
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
78a65518
GG
11 for more information on this driver.
12 Please submit bugs to http://bugzilla.kernel.org/ .
1da177e4
LT
13
14*/
15
16#include <linux/pci.h>
17#include "tulip.h"
1da177e4
LT
18#include <linux/etherdevice.h>
19
20int tulip_rx_copybreak;
21unsigned int tulip_max_interrupt_work;
22
23#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
24#define MIT_SIZE 15
25#define MIT_TABLE 15 /* We use 0 or max */
26
27static unsigned int mit_table[MIT_SIZE+1] =
28{
29 /* CRS11 21143 hardware Mitigation Control Interrupt
30 We use only RX mitigation we other techniques for
31 TX intr. mitigation.
32
33 31 Cycle Size (timer control)
34 30:27 TX timer in 16 * Cycle size
35 26:24 TX No pkts before Int.
36 23:20 RX timer in Cycle size
37 19:17 RX No pkts before Int.
38 16 Continues Mode (CM)
39 */
40
41 0x0, /* IM disabled */
42 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
43 0x80150000,
44 0x80270000,
45 0x80370000,
46 0x80490000,
47 0x80590000,
48 0x80690000,
49 0x807B0000,
50 0x808B0000,
51 0x809D0000,
52 0x80AD0000,
53 0x80BD0000,
54 0x80CF0000,
55 0x80DF0000,
56// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
57 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
58};
59#endif
60
61
62int tulip_refill_rx(struct net_device *dev)
63{
64 struct tulip_private *tp = netdev_priv(dev);
65 int entry;
66 int refilled = 0;
67
68 /* Refill the Rx ring buffers. */
69 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
70 entry = tp->dirty_rx % RX_RING_SIZE;
71 if (tp->rx_buffers[entry].skb == NULL) {
72 struct sk_buff *skb;
73 dma_addr_t mapping;
74
75 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
76 if (skb == NULL)
77 break;
78
689be439 79 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
1da177e4
LT
80 PCI_DMA_FROMDEVICE);
81 tp->rx_buffers[entry].mapping = mapping;
82
83 skb->dev = dev; /* Mark as being used by this device. */
84 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
85 refilled++;
86 }
87 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
88 }
89 if(tp->chip_id == LC82C168) {
90 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
91 /* Rx stopped due to out of buffers,
92 * restart it
93 */
94 iowrite32(0x01, tp->base_addr + CSR2);
95 }
96 }
97 return refilled;
98}
99
100#ifdef CONFIG_TULIP_NAPI
101
102void oom_timer(unsigned long data)
103{
104 struct net_device *dev = (struct net_device *)data;
bea3348e 105 struct tulip_private *tp = netdev_priv(dev);
288379f0 106 napi_schedule(&tp->napi);
1da177e4
LT
107}
108
bea3348e 109int tulip_poll(struct napi_struct *napi, int budget)
1da177e4 110{
bea3348e
SH
111 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
112 struct net_device *dev = tp->dev;
1da177e4 113 int entry = tp->cur_rx % RX_RING_SIZE;
bea3348e
SH
114 int work_done = 0;
115#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
1da177e4 116 int received = 0;
bea3348e 117#endif
1da177e4 118
1da177e4
LT
119#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
120
121/* that one buffer is needed for mit activation; or might be a
122 bug in the ring buffer code; check later -- JHS*/
123
bea3348e 124 if (budget >=RX_RING_SIZE) budget--;
1da177e4
LT
125#endif
126
127 if (tulip_debug > 4)
128 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
129 tp->rx_ring[entry].status);
130
131 do {
132 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
133 printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
134 break;
135 }
136 /* Acknowledge current RX interrupt sources. */
137 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
f3b197ac
JG
138
139
1da177e4
LT
140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
1f8ae0a2 143 short pkt_len;
f3b197ac 144
1da177e4
LT
145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146 break;
f3b197ac 147
1da177e4
LT
148 if (tulip_debug > 5)
149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
150 dev->name, entry, status);
c6a1b62d
SH
151
152 if (++work_done >= budget)
1da177e4 153 goto not_done;
f3b197ac 154
1f8ae0a2
TL
155 /*
156 * Omit the four octet CRC from the length.
157 * (May not be considered valid until we have
158 * checked status for RxLengthOver2047 bits)
159 */
160 pkt_len = ((status >> 16) & 0x7ff) - 4;
161
162 /*
163 * Maximum pkt_len is 1518 (1514 + vlan header)
164 * Anything higher than this is always invalid
165 * regardless of RxLengthOver2047 bits
166 */
167
168 if ((status & (RxLengthOver2047 |
169 RxDescCRCError |
170 RxDescCollisionSeen |
171 RxDescRunt |
172 RxDescDescErr |
173 RxWholePkt)) != RxWholePkt
174 || pkt_len > 1518) {
175 if ((status & (RxLengthOver2047 |
176 RxWholePkt)) != RxWholePkt) {
1da177e4
LT
177 /* Ingore earlier buffers. */
178 if ((status & 0xffff) != 0x7fff) {
179 if (tulip_debug > 1)
180 printk(KERN_WARNING "%s: Oversized Ethernet frame "
181 "spanned multiple buffers, status %8.8x!\n",
182 dev->name, status);
183 tp->stats.rx_length_errors++;
184 }
1f8ae0a2 185 } else {
1da177e4
LT
186 /* There was a fatal error. */
187 if (tulip_debug > 2)
188 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
189 dev->name, status);
190 tp->stats.rx_errors++; /* end of a packet.*/
1f8ae0a2
TL
191 if (pkt_len > 1518 ||
192 (status & RxDescRunt))
193 tp->stats.rx_length_errors++;
194
1da177e4
LT
195 if (status & 0x0004) tp->stats.rx_frame_errors++;
196 if (status & 0x0002) tp->stats.rx_crc_errors++;
197 if (status & 0x0001) tp->stats.rx_fifo_errors++;
198 }
199 } else {
1da177e4 200 struct sk_buff *skb;
f3b197ac 201
1da177e4
LT
202 /* Check if the packet is long enough to accept without copying
203 to a minimally-sized skbuff. */
204 if (pkt_len < tulip_rx_copybreak
205 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4
LT
206 skb_reserve(skb, 2); /* 16 byte align the IP header */
207 pci_dma_sync_single_for_cpu(tp->pdev,
208 tp->rx_buffers[entry].mapping,
209 pkt_len, PCI_DMA_FROMDEVICE);
210#if ! defined(__alpha__)
8c7b7faa
DM
211 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
212 pkt_len);
1da177e4
LT
213 skb_put(skb, pkt_len);
214#else
215 memcpy(skb_put(skb, pkt_len),
689be439 216 tp->rx_buffers[entry].skb->data,
1da177e4
LT
217 pkt_len);
218#endif
219 pci_dma_sync_single_for_device(tp->pdev,
220 tp->rx_buffers[entry].mapping,
221 pkt_len, PCI_DMA_FROMDEVICE);
222 } else { /* Pass up the skb already on the Rx ring. */
223 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
224 pkt_len);
f3b197ac 225
1da177e4
LT
226#ifndef final_version
227 if (tp->rx_buffers[entry].mapping !=
228 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
229 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
230 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
231 dev->name,
232 le32_to_cpu(tp->rx_ring[entry].buffer1),
233 (unsigned long long)tp->rx_buffers[entry].mapping,
234 skb->head, temp);
235 }
236#endif
f3b197ac 237
1da177e4
LT
238 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
239 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
f3b197ac 240
1da177e4
LT
241 tp->rx_buffers[entry].skb = NULL;
242 tp->rx_buffers[entry].mapping = 0;
243 }
244 skb->protocol = eth_type_trans(skb, dev);
f3b197ac 245
1da177e4 246 netif_receive_skb(skb);
f3b197ac 247
1da177e4
LT
248 tp->stats.rx_packets++;
249 tp->stats.rx_bytes += pkt_len;
250 }
bea3348e
SH
251#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
252 received++;
253#endif
1da177e4
LT
254
255 entry = (++tp->cur_rx) % RX_RING_SIZE;
256 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
257 tulip_refill_rx(dev);
f3b197ac 258
1da177e4 259 }
f3b197ac 260
1da177e4
LT
261 /* New ack strategy... irq does not ack Rx any longer
262 hopefully this helps */
f3b197ac 263
1da177e4
LT
264 /* Really bad things can happen here... If new packet arrives
265 * and an irq arrives (tx or just due to occasionally unset
266 * mask), it will be acked by irq handler, but new thread
267 * is not scheduled. It is major hole in design.
268 * No idea how to fix this if "playing with fire" will fail
269 * tomorrow (night 011029). If it will not fail, we won
270 * finally: amount of IO did not increase at all. */
271 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
f3b197ac 272
1da177e4 273 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
f3b197ac 274
1da177e4
LT
275 /* We use this simplistic scheme for IM. It's proven by
276 real life installations. We can have IM enabled
f3b197ac
JG
277 continuesly but this would cause unnecessary latency.
278 Unfortunely we can't use all the NET_RX_* feedback here.
279 This would turn on IM for devices that is not contributing
280 to backlog congestion with unnecessary latency.
281
59c51591 282 We monitor the device RX-ring and have:
f3b197ac 283
1da177e4 284 HW Interrupt Mitigation either ON or OFF.
f3b197ac
JG
285
286 ON: More then 1 pkt received (per intr.) OR we are dropping
1da177e4 287 OFF: Only 1 pkt received
f3b197ac 288
1da177e4 289 Note. We only use min and max (0, 15) settings from mit_table */
f3b197ac
JG
290
291
1da177e4
LT
292 if( tp->flags & HAS_INTR_MITIGATION) {
293 if( received > 1 ) {
294 if( ! tp->mit_on ) {
295 tp->mit_on = 1;
296 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
297 }
298 }
299 else {
300 if( tp->mit_on ) {
301 tp->mit_on = 0;
302 iowrite32(0, tp->base_addr + CSR11);
303 }
304 }
305 }
306
307#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
f3b197ac 308
1da177e4 309 tulip_refill_rx(dev);
f3b197ac 310
1da177e4 311 /* If RX ring is not full we are out of memory. */
bea3348e
SH
312 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
313 goto oom;
f3b197ac 314
1da177e4 315 /* Remove us from polling list and enable RX intr. */
f3b197ac 316
288379f0 317 napi_complete(napi);
1da177e4 318 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
f3b197ac 319
1da177e4
LT
320 /* The last op happens after poll completion. Which means the following:
321 * 1. it can race with disabling irqs in irq handler
322 * 2. it can race with dise/enabling irqs in other poll threads
323 * 3. if an irq raised after beginning loop, it will be immediately
324 * triggered here.
325 *
326 * Summarizing: the logic results in some redundant irqs both
327 * due to races in masking and due to too late acking of already
328 * processed irqs. But it must not result in losing events.
329 */
f3b197ac 330
bea3348e 331 return work_done;
f3b197ac 332
1da177e4 333 not_done:
1da177e4
LT
334 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
335 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
336 tulip_refill_rx(dev);
f3b197ac 337
bea3348e
SH
338 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
339 goto oom;
f3b197ac 340
bea3348e 341 return work_done;
f3b197ac 342
1da177e4 343 oom: /* Executed with RX ints disabled */
f3b197ac 344
1da177e4
LT
345 /* Start timer, stop polling, but do not enable rx interrupts. */
346 mod_timer(&tp->oom_timer, jiffies+1);
f3b197ac 347
1da177e4
LT
348 /* Think: timer_pending() was an explicit signature of bug.
349 * Timer can be pending now but fired and completed
288379f0 350 * before we did napi_complete(). See? We would lose it. */
f3b197ac 351
1da177e4 352 /* remove ourselves from the polling list */
288379f0 353 napi_complete(napi);
f3b197ac 354
bea3348e 355 return work_done;
1da177e4
LT
356}
357
358#else /* CONFIG_TULIP_NAPI */
359
360static int tulip_rx(struct net_device *dev)
361{
362 struct tulip_private *tp = netdev_priv(dev);
363 int entry = tp->cur_rx % RX_RING_SIZE;
364 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
365 int received = 0;
366
367 if (tulip_debug > 4)
368 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
369 tp->rx_ring[entry].status);
370 /* If we own the next entry, it is a new packet. Send it up. */
371 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
372 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
1f8ae0a2 373 short pkt_len;
1da177e4
LT
374
375 if (tulip_debug > 5)
376 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
377 dev->name, entry, status);
378 if (--rx_work_limit < 0)
379 break;
1f8ae0a2
TL
380
381 /*
382 Omit the four octet CRC from the length.
383 (May not be considered valid until we have
384 checked status for RxLengthOver2047 bits)
385 */
386 pkt_len = ((status >> 16) & 0x7ff) - 4;
387 /*
388 Maximum pkt_len is 1518 (1514 + vlan header)
389 Anything higher than this is always invalid
390 regardless of RxLengthOver2047 bits
391 */
392
393 if ((status & (RxLengthOver2047 |
394 RxDescCRCError |
395 RxDescCollisionSeen |
396 RxDescRunt |
397 RxDescDescErr |
398 RxWholePkt)) != RxWholePkt
399 || pkt_len > 1518) {
400 if ((status & (RxLengthOver2047 |
401 RxWholePkt)) != RxWholePkt) {
1da177e4
LT
402 /* Ingore earlier buffers. */
403 if ((status & 0xffff) != 0x7fff) {
404 if (tulip_debug > 1)
405 printk(KERN_WARNING "%s: Oversized Ethernet frame "
406 "spanned multiple buffers, status %8.8x!\n",
407 dev->name, status);
408 tp->stats.rx_length_errors++;
409 }
1f8ae0a2 410 } else {
1da177e4
LT
411 /* There was a fatal error. */
412 if (tulip_debug > 2)
413 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
414 dev->name, status);
415 tp->stats.rx_errors++; /* end of a packet.*/
1f8ae0a2
TL
416 if (pkt_len > 1518 ||
417 (status & RxDescRunt))
418 tp->stats.rx_length_errors++;
1da177e4
LT
419 if (status & 0x0004) tp->stats.rx_frame_errors++;
420 if (status & 0x0002) tp->stats.rx_crc_errors++;
421 if (status & 0x0001) tp->stats.rx_fifo_errors++;
422 }
423 } else {
1da177e4
LT
424 struct sk_buff *skb;
425
1da177e4
LT
426 /* Check if the packet is long enough to accept without copying
427 to a minimally-sized skbuff. */
428 if (pkt_len < tulip_rx_copybreak
429 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4
LT
430 skb_reserve(skb, 2); /* 16 byte align the IP header */
431 pci_dma_sync_single_for_cpu(tp->pdev,
432 tp->rx_buffers[entry].mapping,
433 pkt_len, PCI_DMA_FROMDEVICE);
434#if ! defined(__alpha__)
8c7b7faa
DM
435 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
436 pkt_len);
1da177e4
LT
437 skb_put(skb, pkt_len);
438#else
439 memcpy(skb_put(skb, pkt_len),
689be439 440 tp->rx_buffers[entry].skb->data,
1da177e4
LT
441 pkt_len);
442#endif
443 pci_dma_sync_single_for_device(tp->pdev,
444 tp->rx_buffers[entry].mapping,
445 pkt_len, PCI_DMA_FROMDEVICE);
446 } else { /* Pass up the skb already on the Rx ring. */
447 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
448 pkt_len);
449
450#ifndef final_version
451 if (tp->rx_buffers[entry].mapping !=
452 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
453 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
454 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
455 dev->name,
456 le32_to_cpu(tp->rx_ring[entry].buffer1),
457 (long long)tp->rx_buffers[entry].mapping,
458 skb->head, temp);
459 }
460#endif
461
462 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
463 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
464
465 tp->rx_buffers[entry].skb = NULL;
466 tp->rx_buffers[entry].mapping = 0;
467 }
468 skb->protocol = eth_type_trans(skb, dev);
469
470 netif_rx(skb);
471
1da177e4
LT
472 tp->stats.rx_packets++;
473 tp->stats.rx_bytes += pkt_len;
474 }
475 received++;
476 entry = (++tp->cur_rx) % RX_RING_SIZE;
477 }
478 return received;
479}
480#endif /* CONFIG_TULIP_NAPI */
481
482static inline unsigned int phy_interrupt (struct net_device *dev)
483{
484#ifdef __hppa__
485 struct tulip_private *tp = netdev_priv(dev);
486 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
487
488 if (csr12 != tp->csr12_shadow) {
489 /* ack interrupt */
490 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
491 tp->csr12_shadow = csr12;
492 /* do link change stuff */
493 spin_lock(&tp->lock);
494 tulip_check_duplex(dev);
495 spin_unlock(&tp->lock);
496 /* clear irq ack bit */
497 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
498
499 return 1;
500 }
501#endif
502
503 return 0;
504}
505
506/* The interrupt handler does all of the Rx thread work and cleans up
507 after the Tx thread. */
7d12e780 508irqreturn_t tulip_interrupt(int irq, void *dev_instance)
1da177e4
LT
509{
510 struct net_device *dev = (struct net_device *)dev_instance;
511 struct tulip_private *tp = netdev_priv(dev);
512 void __iomem *ioaddr = tp->base_addr;
513 int csr5;
514 int missed;
515 int rx = 0;
516 int tx = 0;
517 int oi = 0;
518 int maxrx = RX_RING_SIZE;
519 int maxtx = TX_RING_SIZE;
520 int maxoi = TX_RING_SIZE;
521#ifdef CONFIG_TULIP_NAPI
522 int rxd = 0;
523#else
524 int entry;
525#endif
526 unsigned int work_count = tulip_max_interrupt_work;
527 unsigned int handled = 0;
528
529 /* Let's see whether the interrupt really is for us */
530 csr5 = ioread32(ioaddr + CSR5);
531
f3b197ac 532 if (tp->flags & HAS_PHY_IRQ)
1da177e4 533 handled = phy_interrupt (dev);
f3b197ac 534
1da177e4
LT
535 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
536 return IRQ_RETVAL(handled);
537
538 tp->nir++;
539
540 do {
541
542#ifdef CONFIG_TULIP_NAPI
543
544 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
545 rxd++;
546 /* Mask RX intrs and add the device to poll list. */
547 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
288379f0 548 napi_schedule(&tp->napi);
f3b197ac 549
1da177e4
LT
550 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
551 break;
552 }
f3b197ac 553
1da177e4
LT
554 /* Acknowledge the interrupt sources we handle here ASAP
555 the poll function does Rx and RxNoBuf acking */
f3b197ac 556
1da177e4
LT
557 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
558
f3b197ac 559#else
1da177e4
LT
560 /* Acknowledge all of the current interrupt sources ASAP. */
561 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
562
563
564 if (csr5 & (RxIntr | RxNoBuf)) {
565 rx += tulip_rx(dev);
566 tulip_refill_rx(dev);
567 }
568
569#endif /* CONFIG_TULIP_NAPI */
f3b197ac 570
1da177e4
LT
571 if (tulip_debug > 4)
572 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
573 dev->name, csr5, ioread32(ioaddr + CSR5));
f3b197ac 574
1da177e4
LT
575
576 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
577 unsigned int dirty_tx;
578
579 spin_lock(&tp->lock);
580
581 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
582 dirty_tx++) {
583 int entry = dirty_tx % TX_RING_SIZE;
584 int status = le32_to_cpu(tp->tx_ring[entry].status);
585
586 if (status < 0)
587 break; /* It still has not been Txed */
588
589 /* Check for Rx filter setup frames. */
590 if (tp->tx_buffers[entry].skb == NULL) {
591 /* test because dummy frames not mapped */
592 if (tp->tx_buffers[entry].mapping)
593 pci_unmap_single(tp->pdev,
594 tp->tx_buffers[entry].mapping,
595 sizeof(tp->setup_frame),
596 PCI_DMA_TODEVICE);
597 continue;
598 }
599
600 if (status & 0x8000) {
601 /* There was an major error, log it. */
602#ifndef final_version
603 if (tulip_debug > 1)
604 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
605 dev->name, status);
606#endif
607 tp->stats.tx_errors++;
608 if (status & 0x4104) tp->stats.tx_aborted_errors++;
609 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
610 if (status & 0x0200) tp->stats.tx_window_errors++;
611 if (status & 0x0002) tp->stats.tx_fifo_errors++;
612 if ((status & 0x0080) && tp->full_duplex == 0)
613 tp->stats.tx_heartbeat_errors++;
614 } else {
615 tp->stats.tx_bytes +=
616 tp->tx_buffers[entry].skb->len;
617 tp->stats.collisions += (status >> 3) & 15;
618 tp->stats.tx_packets++;
619 }
620
621 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
622 tp->tx_buffers[entry].skb->len,
623 PCI_DMA_TODEVICE);
624
625 /* Free the original skb. */
626 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
627 tp->tx_buffers[entry].skb = NULL;
628 tp->tx_buffers[entry].mapping = 0;
629 tx++;
630 }
631
632#ifndef final_version
633 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
634 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
635 dev->name, dirty_tx, tp->cur_tx);
636 dirty_tx += TX_RING_SIZE;
637 }
638#endif
639
640 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
641 netif_wake_queue(dev);
642
643 tp->dirty_tx = dirty_tx;
644 if (csr5 & TxDied) {
645 if (tulip_debug > 2)
646 printk(KERN_WARNING "%s: The transmitter stopped."
647 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
648 dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
649 tulip_restart_rxtx(tp);
650 }
651 spin_unlock(&tp->lock);
652 }
653
654 /* Log errors. */
655 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
656 if (csr5 == 0xffffffff)
657 break;
658 if (csr5 & TxJabber) tp->stats.tx_errors++;
659 if (csr5 & TxFIFOUnderflow) {
660 if ((tp->csr6 & 0xC000) != 0xC000)
661 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
662 else
663 tp->csr6 |= 0x00200000; /* Store-n-forward. */
664 /* Restart the transmit process. */
665 tulip_restart_rxtx(tp);
666 iowrite32(0, ioaddr + CSR1);
667 }
668 if (csr5 & (RxDied | RxNoBuf)) {
669 if (tp->flags & COMET_MAC_ADDR) {
670 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
671 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
672 }
673 }
674 if (csr5 & RxDied) { /* Missed a Rx frame. */
675 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
676 tp->stats.rx_errors++;
677 tulip_start_rxtx(tp);
678 }
679 /*
680 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
681 * call is ever done under the spinlock
682 */
683 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
684 if (tp->link_change)
685 (tp->link_change)(dev, csr5);
686 }
1ddb9861 687 if (csr5 & SystemError) {
1da177e4
LT
688 int error = (csr5 >> 23) & 7;
689 /* oops, we hit a PCI error. The code produced corresponds
690 * to the reason:
691 * 0 - parity error
692 * 1 - master abort
693 * 2 - target abort
694 * Note that on parity error, we should do a software reset
695 * of the chip to get it back into a sane state (according
696 * to the 21142/3 docs that is).
697 * -- rmk
698 */
699 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
700 dev->name, tp->nir, error);
701 }
702 /* Clear all error sources, included undocumented ones! */
703 iowrite32(0x0800f7ba, ioaddr + CSR5);
704 oi++;
705 }
706 if (csr5 & TimerInt) {
707
708 if (tulip_debug > 2)
709 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
710 dev->name, csr5);
711 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
712 tp->ttimer = 0;
713 oi++;
714 }
715 if (tx > maxtx || rx > maxrx || oi > maxoi) {
716 if (tulip_debug > 1)
717 printk(KERN_WARNING "%s: Too much work during an interrupt, "
718 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
719
720 /* Acknowledge all interrupt sources. */
721 iowrite32(0x8001ffff, ioaddr + CSR5);
722 if (tp->flags & HAS_INTR_MITIGATION) {
723 /* Josip Loncaric at ICASE did extensive experimentation
724 to develop a good interrupt mitigation setting.*/
725 iowrite32(0x8b240000, ioaddr + CSR11);
726 } else if (tp->chip_id == LC82C168) {
727 /* the LC82C168 doesn't have a hw timer.*/
728 iowrite32(0x00, ioaddr + CSR7);
729 mod_timer(&tp->timer, RUN_AT(HZ/50));
730 } else {
731 /* Mask all interrupting sources, set timer to
732 re-enable. */
733 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
734 iowrite32(0x0012, ioaddr + CSR11);
735 }
736 break;
737 }
738
739 work_count--;
740 if (work_count == 0)
741 break;
742
743 csr5 = ioread32(ioaddr + CSR5);
744
745#ifdef CONFIG_TULIP_NAPI
746 if (rxd)
747 csr5 &= ~RxPollInt;
f3b197ac
JG
748 } while ((csr5 & (TxNoBuf |
749 TxDied |
750 TxIntr |
1da177e4
LT
751 TimerInt |
752 /* Abnormal intr. */
f3b197ac
JG
753 RxDied |
754 TxFIFOUnderflow |
755 TxJabber |
756 TPLnkFail |
1ddb9861 757 SystemError )) != 0);
f3b197ac 758#else
1da177e4
LT
759 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
760
761 tulip_refill_rx(dev);
762
763 /* check if the card is in suspend mode */
764 entry = tp->dirty_rx % RX_RING_SIZE;
765 if (tp->rx_buffers[entry].skb == NULL) {
766 if (tulip_debug > 1)
767 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
768 if (tp->chip_id == LC82C168) {
769 iowrite32(0x00, ioaddr + CSR7);
770 mod_timer(&tp->timer, RUN_AT(HZ/50));
771 } else {
772 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
773 if (tulip_debug > 1)
774 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
775 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
776 ioaddr + CSR7);
777 iowrite32(TimerInt, ioaddr + CSR5);
778 iowrite32(12, ioaddr + CSR11);
779 tp->ttimer = 1;
780 }
781 }
782 }
783#endif /* CONFIG_TULIP_NAPI */
784
785 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
786 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
787 }
788
789 if (tulip_debug > 4)
790 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
791 dev->name, ioread32(ioaddr + CSR5));
792
793 return IRQ_HANDLED;
794}