1 /*
2 drivers/net/tulip/interrupt.c
3
4 Maintained by Jeff Garzik <jgarzik@pobox.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
7
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
10
11 Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
14
15 */
16
17 #include <linux/pci.h>
18 #include "tulip.h"
19 #include <linux/config.h>
20 #include <linux/etherdevice.h>
21
22
23 int tulip_rx_copybreak;
24 unsigned int tulip_max_interrupt_work;
25
26 #ifdef CONFIG_NET_HW_FLOWCONTROL
27
28 #define MIT_SIZE 15
29 unsigned int mit_table[MIT_SIZE+1] =
30 {
31 /* CRS11 21143 hardware Mitigation Control Interrupt
32 We use only RX mitigation we other techniques for
33 TX intr. mitigation.
34
35 31 Cycle Size (timer control)
36 30:27 TX timer in 16 * Cycle size
37 26:24 TX No pkts before Int.
38 23:20 RX timer in Cycle size
39 19:17 RX No pkts before Int.
40 16 Continues Mode (CM)
41 */
42
43 0x0, /* IM disabled */
44 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
45 0x80150000,
46 0x80270000,
47 0x80370000,
48 0x80490000,
49 0x80590000,
50 0x80690000,
51 0x807B0000,
52 0x808B0000,
53 0x809D0000,
54 0x80AD0000,
55 0x80BD0000,
56 0x80CF0000,
57 0x80DF0000,
58 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
59 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
60 };
61 #endif
62
63
tulip_refill_rx(struct net_device * dev)64 int tulip_refill_rx(struct net_device *dev)
65 {
66 struct tulip_private *tp = (struct tulip_private *)dev->priv;
67 int entry;
68 int refilled = 0;
69
70 /* Refill the Rx ring buffers. */
71 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
72 entry = tp->dirty_rx % RX_RING_SIZE;
73 if (tp->rx_buffers[entry].skb == NULL) {
74 struct sk_buff *skb;
75 dma_addr_t mapping;
76
77 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
78 if (skb == NULL)
79 break;
80
81 mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
82 PCI_DMA_FROMDEVICE);
83 tp->rx_buffers[entry].mapping = mapping;
84
85 skb->dev = dev; /* Mark as being used by this device. */
86 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
87 refilled++;
88 }
89 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
90 }
91 if(tp->chip_id == LC82C168) {
92 if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
93 /* Rx stopped due to out of buffers,
94 * restart it
95 */
96 outl(0x01, dev->base_addr + CSR2);
97 }
98 }
99 return refilled;
100 }
101
102
tulip_rx(struct net_device * dev)103 static int tulip_rx(struct net_device *dev)
104 {
105 struct tulip_private *tp = (struct tulip_private *)dev->priv;
106 int entry = tp->cur_rx % RX_RING_SIZE;
107 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
108 int received = 0;
109
110 #ifdef CONFIG_NET_HW_FLOWCONTROL
111 int drop = 0, mit_sel = 0;
112
113 /* that one buffer is needed for mit activation; or might be a
114 bug in the ring buffer code; check later -- JHS*/
115
116 if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
117 #endif
118
119 if (tulip_debug > 4)
120 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
121 tp->rx_ring[entry].status);
122 /* If we own the next entry, it is a new packet. Send it up. */
123 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
124 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
125 short pkt_len;
126
127 if (tulip_debug > 5)
128 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
129 dev->name, entry, status);
130 if (--rx_work_limit < 0)
131 break;
132 /*
133 * Omit the four octet CRC from the length.
134 * (May not be considered valid until we have
135 * checked status for RxLengthOver2047 bits)
136 */
137 pkt_len = ((status >> 16) & 0x7ff) - 4;
138
139 /*
140 * Maximum pkt_len is 1518 (1514 + vlan header)
141 * Anything higher than this is always invalid
142 * regardless of RxLengthOver2047 bits
143 */
144
145 if ((status & (RxLengthOver2047 |
146 RxDescCRCError |
147 RxDescCollisionSeen |
148 RxDescRunt |
149 RxDescDescErr |
150 RxWholePkt)) != RxWholePkt
151 || pkt_len > 1518) {
152 if ((status & (RxLengthOver2047 |
153 RxWholePkt)) != RxWholePkt) {
154 /* Ingore earlier buffers. */
155 if ((status & 0xffff) != 0x7fff) {
156 if (tulip_debug > 1)
157 printk(KERN_WARNING "%s: Oversized Ethernet frame "
158 "spanned multiple buffers, status %8.8x!\n",
159 dev->name, status);
160 tp->stats.rx_length_errors++;
161 }
162 } else {
163 /* There was a fatal error. */
164 if (tulip_debug > 2)
165 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
166 dev->name, status);
167 tp->stats.rx_errors++; /* end of a packet.*/
168
169 if (pkt_len > 1518 || (status & RxDescRunt))
170 tp->stats.rx_length_errors++;
171 if (status & 0x0004) tp->stats.rx_frame_errors++;
172 if (status & 0x0002) tp->stats.rx_crc_errors++;
173 if (status & 0x0001) tp->stats.rx_fifo_errors++;
174 }
175 } else {
176 struct sk_buff *skb;
177
178 #ifdef CONFIG_NET_HW_FLOWCONTROL
179 drop = atomic_read(&netdev_dropping);
180 if (drop)
181 goto throttle;
182 #endif
183 /* Check if the packet is long enough to accept without copying
184 to a minimally-sized skbuff. */
185 if (pkt_len < tulip_rx_copybreak
186 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
187 skb->dev = dev;
188 skb_reserve(skb, 2); /* 16 byte align the IP header */
189 pci_dma_sync_single(tp->pdev,
190 tp->rx_buffers[entry].mapping,
191 pkt_len, PCI_DMA_FROMDEVICE);
192 #if ! defined(__alpha__)
193 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
194 pkt_len, 0);
195 skb_put(skb, pkt_len);
196 #else
197 memcpy(skb_put(skb, pkt_len),
198 tp->rx_buffers[entry].skb->tail,
199 pkt_len);
200 #endif
201 } else { /* Pass up the skb already on the Rx ring. */
202 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
203 pkt_len);
204
205 #ifndef final_version
206 if (tp->rx_buffers[entry].mapping !=
207 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
208 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
209 "do not match in tulip_rx: %08x vs. %08x %p / %p.\n",
210 dev->name,
211 le32_to_cpu(tp->rx_ring[entry].buffer1),
212 tp->rx_buffers[entry].mapping,
213 skb->head, temp);
214 }
215 #endif
216
217 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
218 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
219
220 tp->rx_buffers[entry].skb = NULL;
221 tp->rx_buffers[entry].mapping = 0;
222 }
223 skb->protocol = eth_type_trans(skb, dev);
224 #ifdef CONFIG_NET_HW_FLOWCONTROL
225 mit_sel =
226 #endif
227 netif_rx(skb);
228
229 #ifdef CONFIG_NET_HW_FLOWCONTROL
230 switch (mit_sel) {
231 case NET_RX_SUCCESS:
232 case NET_RX_CN_LOW:
233 case NET_RX_CN_MOD:
234 break;
235
236 case NET_RX_CN_HIGH:
237 rx_work_limit -= NET_RX_CN_HIGH; /* additional*/
238 break;
239 case NET_RX_DROP:
240 rx_work_limit = -1;
241 break;
242 default:
243 printk("unknown feedback return code %d\n", mit_sel);
244 break;
245 }
246
247 drop = atomic_read(&netdev_dropping);
248 if (drop) {
249 throttle:
250 rx_work_limit = -1;
251 mit_sel = NET_RX_DROP;
252
253 if (tp->fc_bit) {
254 long ioaddr = dev->base_addr;
255
256 /* disable Rx & RxNoBuf ints. */
257 outl(tulip_tbl[tp->chip_id].valid_intrs&RX_A_NBF_STOP, ioaddr + CSR7);
258 set_bit(tp->fc_bit, &netdev_fc_xoff);
259 }
260 }
261 #endif
262 dev->last_rx = jiffies;
263 tp->stats.rx_packets++;
264 tp->stats.rx_bytes += pkt_len;
265 }
266 received++;
267 entry = (++tp->cur_rx) % RX_RING_SIZE;
268 }
269 #ifdef CONFIG_NET_HW_FLOWCONTROL
270
271 /* We use this simplistic scheme for IM. It's proven by
272 real life installations. We can have IM enabled
273 continuesly but this would cause unnecessary latency.
274 Unfortunely we can't use all the NET_RX_* feedback here.
275 This would turn on IM for devices that is not contributing
276 to backlog congestion with unnecessary latency.
277
278 We monitor the device RX-ring and have:
279
280 HW Interrupt Mitigation either ON or OFF.
281
282 ON: More then 1 pkt received (per intr.) OR we are dropping
283 OFF: Only 1 pkt received
284
285 Note. We only use min and max (0, 15) settings from mit_table */
286
287
288 if( tp->flags & HAS_INTR_MITIGATION) {
289 if((received > 1 || mit_sel == NET_RX_DROP)
290 && tp->mit_sel != 15 ) {
291 tp->mit_sel = 15;
292 tp->mit_change = 1; /* Force IM change */
293 }
294 if((received <= 1 && mit_sel != NET_RX_DROP) && tp->mit_sel != 0 ) {
295 tp->mit_sel = 0;
296 tp->mit_change = 1; /* Force IM change */
297 }
298 }
299
300 return RX_RING_SIZE+1; /* maxrx+1 */
301 #else
302 return received;
303 #endif
304 }
305
phy_interrupt(struct net_device * dev)306 static inline void phy_interrupt (struct net_device *dev)
307 {
308 #ifdef __hppa__
309 int csr12 = inl(dev->base_addr + CSR12) & 0xff;
310 struct tulip_private *tp = (struct tulip_private *)dev->priv;
311
312 if (csr12 != tp->csr12_shadow) {
313 /* ack interrupt */
314 outl(csr12 | 0x02, dev->base_addr + CSR12);
315 tp->csr12_shadow = csr12;
316 /* do link change stuff */
317 spin_lock(&tp->lock);
318 tulip_check_duplex(dev);
319 spin_unlock(&tp->lock);
320 /* clear irq ack bit */
321 outl(csr12 & ~0x02, dev->base_addr + CSR12);
322 }
323 #endif
324 }
325
326 /* The interrupt handler does all of the Rx thread work and cleans up
327 after the Tx thread. */
tulip_interrupt(int irq,void * dev_instance,struct pt_regs * regs)328 void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
329 {
330 struct net_device *dev = (struct net_device *)dev_instance;
331 struct tulip_private *tp = (struct tulip_private *)dev->priv;
332 long ioaddr = dev->base_addr;
333 int csr5;
334 int entry;
335 int missed;
336 int rx = 0;
337 int tx = 0;
338 int oi = 0;
339 int maxrx = RX_RING_SIZE;
340 int maxtx = TX_RING_SIZE;
341 int maxoi = TX_RING_SIZE;
342 unsigned int work_count = tulip_max_interrupt_work;
343
344 /* Let's see whether the interrupt really is for us */
345 csr5 = inl(ioaddr + CSR5);
346
347 if (tp->flags & HAS_PHY_IRQ)
348 phy_interrupt (dev);
349
350 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
351 return;
352
353 tp->nir++;
354
355 do {
356 /* Acknowledge all of the current interrupt sources ASAP. */
357 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
358
359 if (tulip_debug > 4)
360 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
361 dev->name, csr5, inl(dev->base_addr + CSR5));
362
363 if (csr5 & (RxIntr | RxNoBuf)) {
364 #ifdef CONFIG_NET_HW_FLOWCONTROL
365 if ((!tp->fc_bit) ||
366 (!test_bit(tp->fc_bit, &netdev_fc_xoff)))
367 #endif
368 rx += tulip_rx(dev);
369 tulip_refill_rx(dev);
370 }
371
372 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
373 unsigned int dirty_tx;
374
375 spin_lock(&tp->lock);
376
377 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
378 dirty_tx++) {
379 int entry = dirty_tx % TX_RING_SIZE;
380 int status = le32_to_cpu(tp->tx_ring[entry].status);
381
382 if (status < 0)
383 break; /* It still has not been Txed */
384
385 /* Check for Rx filter setup frames. */
386 if (tp->tx_buffers[entry].skb == NULL) {
387 /* test because dummy frames not mapped */
388 if (tp->tx_buffers[entry].mapping)
389 pci_unmap_single(tp->pdev,
390 tp->tx_buffers[entry].mapping,
391 sizeof(tp->setup_frame),
392 PCI_DMA_TODEVICE);
393 continue;
394 }
395
396 if (status & 0x8000) {
397 /* There was an major error, log it. */
398 #ifndef final_version
399 if (tulip_debug > 1)
400 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
401 dev->name, status);
402 #endif
403 tp->stats.tx_errors++;
404 if (status & 0x4104) tp->stats.tx_aborted_errors++;
405 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
406 if (status & 0x0200) tp->stats.tx_window_errors++;
407 if (status & 0x0002) tp->stats.tx_fifo_errors++;
408 if ((status & 0x0080) && tp->full_duplex == 0)
409 tp->stats.tx_heartbeat_errors++;
410 } else {
411 tp->stats.tx_bytes +=
412 tp->tx_buffers[entry].skb->len;
413 tp->stats.collisions += (status >> 3) & 15;
414 tp->stats.tx_packets++;
415 }
416
417 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
418 tp->tx_buffers[entry].skb->len,
419 PCI_DMA_TODEVICE);
420
421 /* Free the original skb. */
422 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
423 tp->tx_buffers[entry].skb = NULL;
424 tp->tx_buffers[entry].mapping = 0;
425 tx++;
426 }
427
428 #ifndef final_version
429 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
430 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
431 dev->name, dirty_tx, tp->cur_tx);
432 dirty_tx += TX_RING_SIZE;
433 }
434 #endif
435
436 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
437 netif_wake_queue(dev);
438
439 tp->dirty_tx = dirty_tx;
440 if (csr5 & TxDied) {
441 if (tulip_debug > 2)
442 printk(KERN_WARNING "%s: The transmitter stopped."
443 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
444 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
445 tulip_restart_rxtx(tp);
446 }
447 spin_unlock(&tp->lock);
448 }
449
450 /* Log errors. */
451 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
452 if (csr5 == 0xffffffff)
453 break;
454 if (csr5 & TxJabber) tp->stats.tx_errors++;
455 if (csr5 & TxFIFOUnderflow) {
456 if ((tp->csr6 & 0xC000) != 0xC000)
457 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
458 else
459 tp->csr6 |= 0x00200000; /* Store-n-forward. */
460 /* Restart the transmit process. */
461 tulip_restart_rxtx(tp);
462 outl(0, ioaddr + CSR1);
463 }
464 if (csr5 & (RxDied | RxNoBuf)) {
465 if (tp->flags & COMET_MAC_ADDR) {
466 outl(tp->mc_filter[0], ioaddr + 0xAC);
467 outl(tp->mc_filter[1], ioaddr + 0xB0);
468 }
469 }
470 if (csr5 & RxDied) { /* Missed a Rx frame. */
471 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
472 #ifdef CONFIG_NET_HW_FLOWCONTROL
473 if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
474 tp->stats.rx_errors++;
475 tulip_start_rxtx(tp);
476 }
477 #else
478 tp->stats.rx_errors++;
479 tulip_start_rxtx(tp);
480 #endif
481 }
482 /*
483 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
484 * call is ever done under the spinlock
485 */
486 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
487 if (tp->link_change)
488 (tp->link_change)(dev, csr5);
489 }
490 if (csr5 & SytemError) {
491 int error = (csr5 >> 23) & 7;
492 /* oops, we hit a PCI error. The code produced corresponds
493 * to the reason:
494 * 0 - parity error
495 * 1 - master abort
496 * 2 - target abort
497 * Note that on parity error, we should do a software reset
498 * of the chip to get it back into a sane state (according
499 * to the 21142/3 docs that is).
500 * -- rmk
501 */
502 printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
503 dev->name, tp->nir, error);
504 }
505 /* Clear all error sources, included undocumented ones! */
506 outl(0x0800f7ba, ioaddr + CSR5);
507 oi++;
508 }
509 if (csr5 & TimerInt) {
510
511 if (tulip_debug > 2)
512 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
513 dev->name, csr5);
514 #ifdef CONFIG_NET_HW_FLOWCONTROL
515 if (tp->fc_bit && (test_bit(tp->fc_bit, &netdev_fc_xoff)))
516 if (net_ratelimit()) printk("BUG!! enabling interupt when FC off (timerintr.) \n");
517 #endif
518 outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
519 tp->ttimer = 0;
520 oi++;
521 }
522 if (tx > maxtx || rx > maxrx || oi > maxoi) {
523 if (tulip_debug > 1)
524 printk(KERN_WARNING "%s: Too much work during an interrupt, "
525 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
526
527 /* Acknowledge all interrupt sources. */
528 outl(0x8001ffff, ioaddr + CSR5);
529 if (tp->flags & HAS_INTR_MITIGATION) {
530 #ifdef CONFIG_NET_HW_FLOWCONTROL
531 if(tp->mit_change) {
532 outl(mit_table[tp->mit_sel], ioaddr + CSR11);
533 tp->mit_change = 0;
534 }
535 #else
536 /* Josip Loncaric at ICASE did extensive experimentation
537 to develop a good interrupt mitigation setting.*/
538 outl(0x8b240000, ioaddr + CSR11);
539 #endif
540 } else if (tp->chip_id == LC82C168) {
541 /* the LC82C168 doesn't have a hw timer.*/
542 outl(0x00, ioaddr + CSR7);
543 mod_timer(&tp->timer, RUN_AT(HZ/50));
544 } else {
545 /* Mask all interrupting sources, set timer to
546 re-enable. */
547 #ifndef CONFIG_NET_HW_FLOWCONTROL
548 outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
549 outl(0x0012, ioaddr + CSR11);
550 #endif
551 }
552 break;
553 }
554
555 work_count--;
556 if (work_count == 0)
557 break;
558
559 csr5 = inl(ioaddr + CSR5);
560 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
561
562 tulip_refill_rx(dev);
563
564 /* check if the card is in suspend mode */
565 entry = tp->dirty_rx % RX_RING_SIZE;
566 if (tp->rx_buffers[entry].skb == NULL) {
567 if (tulip_debug > 1)
568 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
569 if (tp->chip_id == LC82C168) {
570 outl(0x00, ioaddr + CSR7);
571 mod_timer(&tp->timer, RUN_AT(HZ/50));
572 } else {
573 if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
574 if (tulip_debug > 1)
575 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
576 outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
577 ioaddr + CSR7);
578 outl(TimerInt, ioaddr + CSR5);
579 outl(12, ioaddr + CSR11);
580 tp->ttimer = 1;
581 }
582 }
583 }
584
585 if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
586 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
587 }
588
589 if (tulip_debug > 4)
590 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
591 dev->name, inl(ioaddr + CSR5));
592
593 }
594