1 /*
2 * ni6510 (am7990 'lance' chip) driver for Linux-net-3
3 * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
4 * copyrights (c) 1994,1995,1996 by M.Hipp
5 *
6 * This driver can handle the old ni6510 board and the newer ni6510
7 * EtherBlaster. (probably it also works with every full NE2100
8 * compatible card)
9 *
10 * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
11 *
12 * This is an extension to the Linux operating system, and is covered by the
13 * same GNU General Public License that covers the Linux-kernel.
14 *
15 * comments/bugs/suggestions can be sent to:
16 * Michael Hipp
17 * email: hippm@informatik.uni-tuebingen.de
18 *
19 * sources:
20 * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
21 * and from the original drivers by D.Becker
22 *
23 * known problems:
24 * - on some PCI boards (including my own) the card/board/ISA-bridge has
25 * problems with bus master DMA. This results in lotsa overruns.
26 * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
27 * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
28 * Or just play with your BIOS options to optimize ISA-DMA access.
29 * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
30 * defines -> please report me your experience then
31 * - Harald reported for ASUS SP3G mainboards, that you should use
32 * the 'optimal settings' from the user's manual on page 3-12!
33 *
34 * credits:
35 * thanx to Jason Sullivan for sending me a ni6510 card!
36 * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
37 *
38 * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
39 * average: FTP -> 8384421 bytes received in 8.5 seconds
40 * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
41 * peak: FTP -> 8384421 bytes received in 7.5 seconds
42 * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
43 */
44
45 /*
46 * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
47 * 96.Sept.29: virt_to_bus stuff added for new memory modell
48 * 96.April.29: Added Harald Koenig's Patches (MH)
49 * 96.April.13: enhanced error handling .. more tests (MH)
50 * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
51 * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
52 * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
53 * hopefully no more 16MB limit
54 *
55 * 95.Nov.18: multicast tweaked (AC).
56 *
57 * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
58 *
59 * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
60 */
61
62 #include <linux/kernel.h>
63 #include <linux/string.h>
64 #include <linux/errno.h>
65 #include <linux/ioport.h>
66 #include <linux/slab.h>
67 #include <linux/interrupt.h>
68 #include <linux/delay.h>
69 #include <linux/init.h>
70 #include <linux/netdevice.h>
71 #include <linux/etherdevice.h>
72 #include <linux/skbuff.h>
73 #include <linux/module.h>
74 #include <linux/bitops.h>
75
76 #include <asm/io.h>
77 #include <asm/dma.h>
78
79 #include "ni65.h"
80
81 /*
82 * the current setting allows an acceptable performance
83 * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
84 * the header of this file
85 * 'invert' the defines for max. performance. This may cause DMA problems
86 * on some boards (e.g on my ASUS SP3G)
87 */
88 #undef XMT_VIA_SKB
89 #undef RCV_VIA_SKB
90 #define RCV_PARANOIA_CHECK
91
92 #define MID_PERFORMANCE
93
94 #if defined( LOW_PERFORMANCE )
95 static int isa0=7,isa1=7,csr80=0x0c10;
96 #elif defined( MID_PERFORMANCE )
97 static int isa0=5,isa1=5,csr80=0x2810;
98 #else /* high performance */
99 static int isa0=4,isa1=4,csr80=0x0017;
100 #endif
101
102 /*
103 * a few card/vendor specific defines
104 */
105 #define NI65_ID0 0x00
106 #define NI65_ID1 0x55
107 #define NI65_EB_ID0 0x52
108 #define NI65_EB_ID1 0x44
109 #define NE2100_ID0 0x57
110 #define NE2100_ID1 0x57
111
112 #define PORT p->cmdr_addr
113
114 /*
115 * buffer configuration
116 */
117 #if 1
118 #define RMDNUM 16
119 #define RMDNUMMASK 0x80000000
120 #else
121 #define RMDNUM 8
122 #define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
123 #endif
124
125 #if 0
126 #define TMDNUM 1
127 #define TMDNUMMASK 0x00000000
128 #else
129 #define TMDNUM 4
130 #define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
131 #endif
132
133 /* slightly oversized */
134 #define R_BUF_SIZE 1544
135 #define T_BUF_SIZE 1544
136
137 /*
138 * lance register defines
139 */
140 #define L_DATAREG 0x00
141 #define L_ADDRREG 0x02
142 #define L_RESET 0x04
143 #define L_CONFIG 0x05
144 #define L_BUSIF 0x06
145
146 /*
147 * to access the lance/am7990-regs, you have to write
148 * reg-number into L_ADDRREG, then you can access it using L_DATAREG
149 */
150 #define CSR0 0x00
151 #define CSR1 0x01
152 #define CSR2 0x02
153 #define CSR3 0x03
154
155 #define INIT_RING_BEFORE_START 0x1
156 #define FULL_RESET_ON_ERROR 0x2
157
158 #if 0
159 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
160 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
161 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
162 inw(PORT+L_DATAREG))
163 #if 0
164 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
165 #else
166 #define writedatareg(val) { writereg(val,CSR0); }
167 #endif
168 #else
169 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
170 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
171 #define writedatareg(val) { writereg(val,CSR0); }
172 #endif
173
174 static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
175
176 static struct card {
177 unsigned char id0,id1;
178 short id_offset;
179 short total_size;
180 short cmd_offset;
181 short addr_offset;
182 unsigned char *vendor_id;
183 char *cardname;
184 unsigned long config;
185 } cards[] = {
186 {
187 .id0 = NI65_ID0,
188 .id1 = NI65_ID1,
189 .id_offset = 0x0e,
190 .total_size = 0x10,
191 .cmd_offset = 0x0,
192 .addr_offset = 0x8,
193 .vendor_id = ni_vendor,
194 .cardname = "ni6510",
195 .config = 0x1,
196 },
197 {
198 .id0 = NI65_EB_ID0,
199 .id1 = NI65_EB_ID1,
200 .id_offset = 0x0e,
201 .total_size = 0x18,
202 .cmd_offset = 0x10,
203 .addr_offset = 0x0,
204 .vendor_id = ni_vendor,
205 .cardname = "ni6510 EtherBlaster",
206 .config = 0x2,
207 },
208 {
209 .id0 = NE2100_ID0,
210 .id1 = NE2100_ID1,
211 .id_offset = 0x0e,
212 .total_size = 0x18,
213 .cmd_offset = 0x10,
214 .addr_offset = 0x0,
215 .vendor_id = NULL,
216 .cardname = "generic NE2100",
217 .config = 0x0,
218 },
219 };
220 #define NUM_CARDS 3
221
222 struct priv
223 {
224 struct rmd rmdhead[RMDNUM];
225 struct tmd tmdhead[TMDNUM];
226 struct init_block ib;
227 int rmdnum;
228 int tmdnum,tmdlast;
229 #ifdef RCV_VIA_SKB
230 struct sk_buff *recv_skb[RMDNUM];
231 #else
232 void *recvbounce[RMDNUM];
233 #endif
234 #ifdef XMT_VIA_SKB
235 struct sk_buff *tmd_skb[TMDNUM];
236 #endif
237 void *tmdbounce[TMDNUM];
238 int tmdbouncenum;
239 int lock,xmit_queued;
240
241 void *self;
242 int cmdr_addr;
243 int cardno;
244 int features;
245 spinlock_t ring_lock;
246 };
247
248 static int ni65_probe1(struct net_device *dev,int);
249 static irqreturn_t ni65_interrupt(int irq, void * dev_id);
250 static void ni65_recv_intr(struct net_device *dev,int);
251 static void ni65_xmit_intr(struct net_device *dev,int);
252 static int ni65_open(struct net_device *dev);
253 static int ni65_lance_reinit(struct net_device *dev);
254 static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
255 static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
256 struct net_device *dev);
257 static void ni65_timeout(struct net_device *dev);
258 static int ni65_close(struct net_device *dev);
259 static int ni65_alloc_buffer(struct net_device *dev);
260 static void ni65_free_buffer(struct priv *p);
261 static void set_multicast_list(struct net_device *dev);
262
263 static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
264 static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
265
266 static int debuglevel = 1;
267
268 /*
269 * set 'performance' registers .. we must STOP lance for that
270 */
ni65_set_performance(struct priv * p)271 static void ni65_set_performance(struct priv *p)
272 {
273 writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
274
275 if( !(cards[p->cardno].config & 0x02) )
276 return;
277
278 outw(80,PORT+L_ADDRREG);
279 if(inw(PORT+L_ADDRREG) != 80)
280 return;
281
282 writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
283 outw(0,PORT+L_ADDRREG);
284 outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
285 outw(1,PORT+L_ADDRREG);
286 outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
287
288 outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
289 }
290
291 /*
292 * open interface (up)
293 */
ni65_open(struct net_device * dev)294 static int ni65_open(struct net_device *dev)
295 {
296 struct priv *p = dev->ml_priv;
297 int irqval = request_irq(dev->irq, ni65_interrupt,0,
298 cards[p->cardno].cardname,dev);
299 if (irqval) {
300 printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
301 dev->name,dev->irq, irqval);
302 return -EAGAIN;
303 }
304
305 if(ni65_lance_reinit(dev))
306 {
307 netif_start_queue(dev);
308 return 0;
309 }
310 else
311 {
312 free_irq(dev->irq,dev);
313 return -EAGAIN;
314 }
315 }
316
317 /*
318 * close interface (down)
319 */
ni65_close(struct net_device * dev)320 static int ni65_close(struct net_device *dev)
321 {
322 struct priv *p = dev->ml_priv;
323
324 netif_stop_queue(dev);
325
326 outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
327
328 #ifdef XMT_VIA_SKB
329 {
330 int i;
331 for(i=0;i<TMDNUM;i++)
332 {
333 if(p->tmd_skb[i]) {
334 dev_kfree_skb(p->tmd_skb[i]);
335 p->tmd_skb[i] = NULL;
336 }
337 }
338 }
339 #endif
340 free_irq(dev->irq,dev);
341 return 0;
342 }
343
cleanup_card(struct net_device * dev)344 static void cleanup_card(struct net_device *dev)
345 {
346 struct priv *p = dev->ml_priv;
347 disable_dma(dev->dma);
348 free_dma(dev->dma);
349 release_region(dev->base_addr, cards[p->cardno].total_size);
350 ni65_free_buffer(p);
351 }
352
353 /* set: io,irq,dma or set it when calling insmod */
354 static int irq;
355 static int io;
356 static int dma;
357
358 /*
359 * Probe The Card (not the lance-chip)
360 */
ni65_probe(int unit)361 struct net_device * __init ni65_probe(int unit)
362 {
363 struct net_device *dev = alloc_etherdev(0);
364 static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
365 const int *port;
366 int err = 0;
367
368 if (!dev)
369 return ERR_PTR(-ENOMEM);
370
371 if (unit >= 0) {
372 sprintf(dev->name, "eth%d", unit);
373 netdev_boot_setup_check(dev);
374 irq = dev->irq;
375 dma = dev->dma;
376 } else {
377 dev->base_addr = io;
378 }
379
380 if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
381 err = ni65_probe1(dev, dev->base_addr);
382 } else if (dev->base_addr > 0) { /* Don't probe at all. */
383 err = -ENXIO;
384 } else {
385 for (port = ports; *port && ni65_probe1(dev, *port); port++)
386 ;
387 if (!*port)
388 err = -ENODEV;
389 }
390 if (err)
391 goto out;
392
393 err = register_netdev(dev);
394 if (err)
395 goto out1;
396 return dev;
397 out1:
398 cleanup_card(dev);
399 out:
400 free_netdev(dev);
401 return ERR_PTR(err);
402 }
403
404 static const struct net_device_ops ni65_netdev_ops = {
405 .ndo_open = ni65_open,
406 .ndo_stop = ni65_close,
407 .ndo_start_xmit = ni65_send_packet,
408 .ndo_tx_timeout = ni65_timeout,
409 .ndo_set_multicast_list = set_multicast_list,
410 .ndo_change_mtu = eth_change_mtu,
411 .ndo_set_mac_address = eth_mac_addr,
412 .ndo_validate_addr = eth_validate_addr,
413 };
414
415 /*
416 * this is the real card probe ..
417 */
ni65_probe1(struct net_device * dev,int ioaddr)418 static int __init ni65_probe1(struct net_device *dev,int ioaddr)
419 {
420 int i,j;
421 struct priv *p;
422 unsigned long flags;
423
424 dev->irq = irq;
425 dev->dma = dma;
426
427 for(i=0;i<NUM_CARDS;i++) {
428 if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
429 continue;
430 if(cards[i].id_offset >= 0) {
431 if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
432 inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
433 release_region(ioaddr, cards[i].total_size);
434 continue;
435 }
436 }
437 if(cards[i].vendor_id) {
438 for(j=0;j<3;j++)
439 if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
440 release_region(ioaddr, cards[i].total_size);
441 continue;
442 }
443 }
444 break;
445 }
446 if(i == NUM_CARDS)
447 return -ENODEV;
448
449 for(j=0;j<6;j++)
450 dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
451
452 if( (j=ni65_alloc_buffer(dev)) < 0) {
453 release_region(ioaddr, cards[i].total_size);
454 return j;
455 }
456 p = dev->ml_priv;
457 p->cmdr_addr = ioaddr + cards[i].cmd_offset;
458 p->cardno = i;
459 spin_lock_init(&p->ring_lock);
460
461 printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
462
463 outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
464 if( (j=readreg(CSR0)) != 0x4) {
465 printk("failed.\n");
466 printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
467 ni65_free_buffer(p);
468 release_region(ioaddr, cards[p->cardno].total_size);
469 return -EAGAIN;
470 }
471
472 outw(88,PORT+L_ADDRREG);
473 if(inw(PORT+L_ADDRREG) == 88) {
474 unsigned long v;
475 v = inw(PORT+L_DATAREG);
476 v <<= 16;
477 outw(89,PORT+L_ADDRREG);
478 v |= inw(PORT+L_DATAREG);
479 printk("Version %#08lx, ",v);
480 p->features = INIT_RING_BEFORE_START;
481 }
482 else {
483 printk("ancient LANCE, ");
484 p->features = 0x0;
485 }
486
487 if(test_bit(0,&cards[i].config)) {
488 dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
489 dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
490 printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
491 }
492 else {
493 if(dev->dma == 0) {
494 /* 'stuck test' from lance.c */
495 unsigned long dma_channels =
496 ((inb(DMA1_STAT_REG) >> 4) & 0x0f)
497 | (inb(DMA2_STAT_REG) & 0xf0);
498 for(i=1;i<5;i++) {
499 int dma = dmatab[i];
500 if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
501 continue;
502
503 flags=claim_dma_lock();
504 disable_dma(dma);
505 set_dma_mode(dma,DMA_MODE_CASCADE);
506 enable_dma(dma);
507 release_dma_lock(flags);
508
509 ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
510
511 flags=claim_dma_lock();
512 disable_dma(dma);
513 free_dma(dma);
514 release_dma_lock(flags);
515
516 if(readreg(CSR0) & CSR0_IDON)
517 break;
518 }
519 if(i == 5) {
520 printk("failed.\n");
521 printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
522 ni65_free_buffer(p);
523 release_region(ioaddr, cards[p->cardno].total_size);
524 return -EAGAIN;
525 }
526 dev->dma = dmatab[i];
527 printk("DMA %d (autodetected), ",dev->dma);
528 }
529 else
530 printk("DMA %d (assigned), ",dev->dma);
531
532 if(dev->irq < 2)
533 {
534 unsigned long irq_mask;
535
536 ni65_init_lance(p,dev->dev_addr,0,0);
537 irq_mask = probe_irq_on();
538 writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
539 msleep(20);
540 dev->irq = probe_irq_off(irq_mask);
541 if(!dev->irq)
542 {
543 printk("Failed to detect IRQ line!\n");
544 ni65_free_buffer(p);
545 release_region(ioaddr, cards[p->cardno].total_size);
546 return -EAGAIN;
547 }
548 printk("IRQ %d (autodetected).\n",dev->irq);
549 }
550 else
551 printk("IRQ %d (assigned).\n",dev->irq);
552 }
553
554 if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
555 {
556 printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
557 ni65_free_buffer(p);
558 release_region(ioaddr, cards[p->cardno].total_size);
559 return -EAGAIN;
560 }
561
562 dev->base_addr = ioaddr;
563 dev->netdev_ops = &ni65_netdev_ops;
564 dev->watchdog_timeo = HZ/2;
565
566 return 0; /* everything is OK */
567 }
568
569 /*
570 * set lance register and trigger init
571 */
ni65_init_lance(struct priv * p,unsigned char * daddr,int filter,int mode)572 static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
573 {
574 int i;
575 u32 pib;
576
577 writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
578
579 for(i=0;i<6;i++)
580 p->ib.eaddr[i] = daddr[i];
581
582 for(i=0;i<8;i++)
583 p->ib.filter[i] = filter;
584 p->ib.mode = mode;
585
586 p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
587 p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
588 writereg(0,CSR3); /* busmaster/no word-swap */
589 pib = (u32) isa_virt_to_bus(&p->ib);
590 writereg(pib & 0xffff,CSR1);
591 writereg(pib >> 16,CSR2);
592
593 writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
594
595 for(i=0;i<32;i++)
596 {
597 mdelay(4);
598 if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
599 break; /* init ok ? */
600 }
601 }
602
603 /*
604 * allocate memory area and check the 16MB border
605 */
ni65_alloc_mem(struct net_device * dev,char * what,int size,int type)606 static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
607 {
608 struct sk_buff *skb=NULL;
609 unsigned char *ptr;
610 void *ret;
611
612 if(type) {
613 ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
614 if(!skb) {
615 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
616 return NULL;
617 }
618 skb_reserve(skb,2+16);
619 skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
620 ptr = skb->data;
621 }
622 else {
623 ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
624 if(!ret) {
625 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
626 return NULL;
627 }
628 }
629 if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
630 printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
631 if(type)
632 kfree_skb(skb);
633 else
634 kfree(ptr);
635 return NULL;
636 }
637 return ret;
638 }
639
640 /*
641 * allocate all memory structures .. send/recv buffers etc ...
642 */
ni65_alloc_buffer(struct net_device * dev)643 static int ni65_alloc_buffer(struct net_device *dev)
644 {
645 unsigned char *ptr;
646 struct priv *p;
647 int i;
648
649 /*
650 * we need 8-aligned memory ..
651 */
652 ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
653 if(!ptr)
654 return -ENOMEM;
655
656 p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
657 memset((char *)p, 0, sizeof(struct priv));
658 p->self = ptr;
659
660 for(i=0;i<TMDNUM;i++)
661 {
662 #ifdef XMT_VIA_SKB
663 p->tmd_skb[i] = NULL;
664 #endif
665 p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
666 if(!p->tmdbounce[i]) {
667 ni65_free_buffer(p);
668 return -ENOMEM;
669 }
670 }
671
672 for(i=0;i<RMDNUM;i++)
673 {
674 #ifdef RCV_VIA_SKB
675 p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
676 if(!p->recv_skb[i]) {
677 ni65_free_buffer(p);
678 return -ENOMEM;
679 }
680 #else
681 p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
682 if(!p->recvbounce[i]) {
683 ni65_free_buffer(p);
684 return -ENOMEM;
685 }
686 #endif
687 }
688
689 return 0; /* everything is OK */
690 }
691
692 /*
693 * free buffers and private struct
694 */
ni65_free_buffer(struct priv * p)695 static void ni65_free_buffer(struct priv *p)
696 {
697 int i;
698
699 if(!p)
700 return;
701
702 for(i=0;i<TMDNUM;i++) {
703 kfree(p->tmdbounce[i]);
704 #ifdef XMT_VIA_SKB
705 if(p->tmd_skb[i])
706 dev_kfree_skb(p->tmd_skb[i]);
707 #endif
708 }
709
710 for(i=0;i<RMDNUM;i++)
711 {
712 #ifdef RCV_VIA_SKB
713 if(p->recv_skb[i])
714 dev_kfree_skb(p->recv_skb[i]);
715 #else
716 kfree(p->recvbounce[i]);
717 #endif
718 }
719 kfree(p->self);
720 }
721
722
723 /*
724 * stop and (re)start lance .. e.g after an error
725 */
ni65_stop_start(struct net_device * dev,struct priv * p)726 static void ni65_stop_start(struct net_device *dev,struct priv *p)
727 {
728 int csr0 = CSR0_INEA;
729
730 writedatareg(CSR0_STOP);
731
732 if(debuglevel > 1)
733 printk(KERN_DEBUG "ni65_stop_start\n");
734
735 if(p->features & INIT_RING_BEFORE_START) {
736 int i;
737 #ifdef XMT_VIA_SKB
738 struct sk_buff *skb_save[TMDNUM];
739 #endif
740 unsigned long buffer[TMDNUM];
741 short blen[TMDNUM];
742
743 if(p->xmit_queued) {
744 while(1) {
745 if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
746 break;
747 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
748 if(p->tmdlast == p->tmdnum)
749 break;
750 }
751 }
752
753 for(i=0;i<TMDNUM;i++) {
754 struct tmd *tmdp = p->tmdhead + i;
755 #ifdef XMT_VIA_SKB
756 skb_save[i] = p->tmd_skb[i];
757 #endif
758 buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
759 blen[i] = tmdp->blen;
760 tmdp->u.s.status = 0x0;
761 }
762
763 for(i=0;i<RMDNUM;i++) {
764 struct rmd *rmdp = p->rmdhead + i;
765 rmdp->u.s.status = RCV_OWN;
766 }
767 p->tmdnum = p->xmit_queued = 0;
768 writedatareg(CSR0_STRT | csr0);
769
770 for(i=0;i<TMDNUM;i++) {
771 int num = (i + p->tmdlast) & (TMDNUM-1);
772 p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
773 p->tmdhead[i].blen = blen[num];
774 if(p->tmdhead[i].u.s.status & XMIT_OWN) {
775 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
776 p->xmit_queued = 1;
777 writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
778 }
779 #ifdef XMT_VIA_SKB
780 p->tmd_skb[i] = skb_save[num];
781 #endif
782 }
783 p->rmdnum = p->tmdlast = 0;
784 if(!p->lock)
785 if (p->tmdnum || !p->xmit_queued)
786 netif_wake_queue(dev);
787 dev->trans_start = jiffies; /* prevent tx timeout */
788 }
789 else
790 writedatareg(CSR0_STRT | csr0);
791 }
792
793 /*
794 * init lance (write init-values .. init-buffers) (open-helper)
795 */
ni65_lance_reinit(struct net_device * dev)796 static int ni65_lance_reinit(struct net_device *dev)
797 {
798 int i;
799 struct priv *p = dev->ml_priv;
800 unsigned long flags;
801
802 p->lock = 0;
803 p->xmit_queued = 0;
804
805 flags=claim_dma_lock();
806 disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
807 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
808 enable_dma(dev->dma);
809 release_dma_lock(flags);
810
811 outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
812 if( (i=readreg(CSR0) ) != 0x4)
813 {
814 printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
815 cards[p->cardno].cardname,(int) i);
816 flags=claim_dma_lock();
817 disable_dma(dev->dma);
818 release_dma_lock(flags);
819 return 0;
820 }
821
822 p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
823 for(i=0;i<TMDNUM;i++)
824 {
825 struct tmd *tmdp = p->tmdhead + i;
826 #ifdef XMT_VIA_SKB
827 if(p->tmd_skb[i]) {
828 dev_kfree_skb(p->tmd_skb[i]);
829 p->tmd_skb[i] = NULL;
830 }
831 #endif
832 tmdp->u.buffer = 0x0;
833 tmdp->u.s.status = XMIT_START | XMIT_END;
834 tmdp->blen = tmdp->status2 = 0;
835 }
836
837 for(i=0;i<RMDNUM;i++)
838 {
839 struct rmd *rmdp = p->rmdhead + i;
840 #ifdef RCV_VIA_SKB
841 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
842 #else
843 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
844 #endif
845 rmdp->blen = -(R_BUF_SIZE-8);
846 rmdp->mlen = 0;
847 rmdp->u.s.status = RCV_OWN;
848 }
849
850 if(dev->flags & IFF_PROMISC)
851 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
852 else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
853 ni65_init_lance(p,dev->dev_addr,0xff,0x0);
854 else
855 ni65_init_lance(p,dev->dev_addr,0x00,0x00);
856
857 /*
858 * ni65_set_lance_mem() sets L_ADDRREG to CSR0
859 * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
860 */
861
862 if(inw(PORT+L_DATAREG) & CSR0_IDON) {
863 ni65_set_performance(p);
864 /* init OK: start lance , enable interrupts */
865 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
866 return 1; /* ->OK */
867 }
868 printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
869 flags=claim_dma_lock();
870 disable_dma(dev->dma);
871 release_dma_lock(flags);
872 return 0; /* ->Error */
873 }
874
875 /*
876 * interrupt handler
877 */
ni65_interrupt(int irq,void * dev_id)878 static irqreturn_t ni65_interrupt(int irq, void * dev_id)
879 {
880 int csr0 = 0;
881 struct net_device *dev = dev_id;
882 struct priv *p;
883 int bcnt = 32;
884
885 p = dev->ml_priv;
886
887 spin_lock(&p->ring_lock);
888
889 while(--bcnt) {
890 csr0 = inw(PORT+L_DATAREG);
891
892 #if 0
893 writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
894 #else
895 writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
896 #endif
897
898 if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
899 break;
900
901 if(csr0 & CSR0_RINT) /* RECV-int? */
902 ni65_recv_intr(dev,csr0);
903 if(csr0 & CSR0_TINT) /* XMIT-int? */
904 ni65_xmit_intr(dev,csr0);
905
906 if(csr0 & CSR0_ERR)
907 {
908 if(debuglevel > 1)
909 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
910 if(csr0 & CSR0_BABL)
911 dev->stats.tx_errors++;
912 if(csr0 & CSR0_MISS) {
913 int i;
914 for(i=0;i<RMDNUM;i++)
915 printk("%02x ",p->rmdhead[i].u.s.status);
916 printk("\n");
917 dev->stats.rx_errors++;
918 }
919 if(csr0 & CSR0_MERR) {
920 if(debuglevel > 1)
921 printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
922 ni65_stop_start(dev,p);
923 }
924 }
925 }
926
927 #ifdef RCV_PARANOIA_CHECK
928 {
929 int j;
930 for(j=0;j<RMDNUM;j++)
931 {
932 int i, num2;
933 for(i=RMDNUM-1;i>0;i--) {
934 num2 = (p->rmdnum + i) & (RMDNUM-1);
935 if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
936 break;
937 }
938
939 if(i) {
940 int k, num1;
941 for(k=0;k<RMDNUM;k++) {
942 num1 = (p->rmdnum + k) & (RMDNUM-1);
943 if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
944 break;
945 }
946 if(!k)
947 break;
948
949 if(debuglevel > 0)
950 {
951 char buf[256],*buf1;
952 buf1 = buf;
953 for(k=0;k<RMDNUM;k++) {
954 sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
955 buf1 += 3;
956 }
957 *buf1 = 0;
958 printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
959 }
960
961 p->rmdnum = num1;
962 ni65_recv_intr(dev,csr0);
963 if((p->rmdhead[num2].u.s.status & RCV_OWN))
964 break; /* ok, we are 'in sync' again */
965 }
966 else
967 break;
968 }
969 }
970 #endif
971
972 if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
973 printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
974 ni65_stop_start(dev,p);
975 }
976 else
977 writedatareg(CSR0_INEA);
978
979 spin_unlock(&p->ring_lock);
980 return IRQ_HANDLED;
981 }
982
983 /*
984 * We have received an Xmit-Interrupt ..
985 * send a new packet if necessary
986 */
ni65_xmit_intr(struct net_device * dev,int csr0)987 static void ni65_xmit_intr(struct net_device *dev,int csr0)
988 {
989 struct priv *p = dev->ml_priv;
990
991 while(p->xmit_queued)
992 {
993 struct tmd *tmdp = p->tmdhead + p->tmdlast;
994 int tmdstat = tmdp->u.s.status;
995
996 if(tmdstat & XMIT_OWN)
997 break;
998
999 if(tmdstat & XMIT_ERR)
1000 {
1001 #if 0
1002 if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
1003 printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
1004 #endif
1005 /* checking some errors */
1006 if(tmdp->status2 & XMIT_RTRY)
1007 dev->stats.tx_aborted_errors++;
1008 if(tmdp->status2 & XMIT_LCAR)
1009 dev->stats.tx_carrier_errors++;
1010 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
1011 /* this stops the xmitter */
1012 dev->stats.tx_fifo_errors++;
1013 if(debuglevel > 0)
1014 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
1015 if(p->features & INIT_RING_BEFORE_START) {
1016 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
1017 ni65_stop_start(dev,p);
1018 break; /* no more Xmit processing .. */
1019 }
1020 else
1021 ni65_stop_start(dev,p);
1022 }
1023 if(debuglevel > 2)
1024 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
1025 if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
1026 dev->stats.tx_errors++;
1027 tmdp->status2 = 0;
1028 }
1029 else {
1030 dev->stats.tx_bytes -= (short)(tmdp->blen);
1031 dev->stats.tx_packets++;
1032 }
1033
1034 #ifdef XMT_VIA_SKB
1035 if(p->tmd_skb[p->tmdlast]) {
1036 dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
1037 p->tmd_skb[p->tmdlast] = NULL;
1038 }
1039 #endif
1040
1041 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
1042 if(p->tmdlast == p->tmdnum)
1043 p->xmit_queued = 0;
1044 }
1045 netif_wake_queue(dev);
1046 }
1047
1048 /*
1049 * We have received a packet
1050 */
ni65_recv_intr(struct net_device * dev,int csr0)1051 static void ni65_recv_intr(struct net_device *dev,int csr0)
1052 {
1053 struct rmd *rmdp;
1054 int rmdstat,len;
1055 int cnt=0;
1056 struct priv *p = dev->ml_priv;
1057
1058 rmdp = p->rmdhead + p->rmdnum;
1059 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
1060 {
1061 cnt++;
1062 if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
1063 {
1064 if(!(rmdstat & RCV_ERR)) {
1065 if(rmdstat & RCV_START)
1066 {
1067 dev->stats.rx_length_errors++;
1068 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
1069 }
1070 }
1071 else {
1072 if(debuglevel > 2)
1073 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
1074 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
1075 if(rmdstat & RCV_FRAM)
1076 dev->stats.rx_frame_errors++;
1077 if(rmdstat & RCV_OFLO)
1078 dev->stats.rx_over_errors++;
1079 if(rmdstat & RCV_CRC)
1080 dev->stats.rx_crc_errors++;
1081 if(rmdstat & RCV_BUF_ERR)
1082 dev->stats.rx_fifo_errors++;
1083 }
1084 if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
1085 dev->stats.rx_errors++;
1086 }
1087 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
1088 {
1089 #ifdef RCV_VIA_SKB
1090 struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
1091 if (skb)
1092 skb_reserve(skb,16);
1093 #else
1094 struct sk_buff *skb = dev_alloc_skb(len+2);
1095 #endif
1096 if(skb)
1097 {
1098 skb_reserve(skb,2);
1099 #ifdef RCV_VIA_SKB
1100 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
1101 skb_put(skb,len);
1102 skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
1103 }
1104 else {
1105 struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
1106 skb_put(skb,R_BUF_SIZE);
1107 p->recv_skb[p->rmdnum] = skb;
1108 rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1109 skb = skb1;
1110 skb_trim(skb,len);
1111 }
1112 #else
1113 skb_put(skb,len);
1114 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
1115 #endif
1116 dev->stats.rx_packets++;
1117 dev->stats.rx_bytes += len;
1118 skb->protocol=eth_type_trans(skb,dev);
1119 netif_rx(skb);
1120 }
1121 else
1122 {
1123 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
1124 dev->stats.rx_dropped++;
1125 }
1126 }
1127 else {
1128 printk(KERN_INFO "%s: received runt packet\n",dev->name);
1129 dev->stats.rx_errors++;
1130 }
1131 rmdp->blen = -(R_BUF_SIZE-8);
1132 rmdp->mlen = 0;
1133 rmdp->u.s.status = RCV_OWN; /* change owner */
1134 p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
1135 rmdp = p->rmdhead + p->rmdnum;
1136 }
1137 }
1138
1139 /*
1140 * kick xmitter ..
1141 */
1142
ni65_timeout(struct net_device * dev)1143 static void ni65_timeout(struct net_device *dev)
1144 {
1145 int i;
1146 struct priv *p = dev->ml_priv;
1147
1148 printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
1149 for(i=0;i<TMDNUM;i++)
1150 printk("%02x ",p->tmdhead[i].u.s.status);
1151 printk("\n");
1152 ni65_lance_reinit(dev);
1153 dev->trans_start = jiffies; /* prevent tx timeout */
1154 netif_wake_queue(dev);
1155 }
1156
1157 /*
1158 * Send a packet
1159 */
1160
ni65_send_packet(struct sk_buff * skb,struct net_device * dev)1161 static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
1162 struct net_device *dev)
1163 {
1164 struct priv *p = dev->ml_priv;
1165
1166 netif_stop_queue(dev);
1167
1168 if (test_and_set_bit(0, (void*)&p->lock)) {
1169 printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
1170 return NETDEV_TX_BUSY;
1171 }
1172
1173 {
1174 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1175 struct tmd *tmdp;
1176 unsigned long flags;
1177
1178 #ifdef XMT_VIA_SKB
1179 if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
1180 #endif
1181
1182 skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
1183 skb->len > T_BUF_SIZE ? T_BUF_SIZE :
1184 skb->len);
1185 if (len > skb->len)
1186 memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
1187 dev_kfree_skb (skb);
1188
1189 spin_lock_irqsave(&p->ring_lock, flags);
1190 tmdp = p->tmdhead + p->tmdnum;
1191 tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
1192 p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
1193
1194 #ifdef XMT_VIA_SKB
1195 }
1196 else {
1197 spin_lock_irqsave(&p->ring_lock, flags);
1198
1199 tmdp = p->tmdhead + p->tmdnum;
1200 tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1201 p->tmd_skb[p->tmdnum] = skb;
1202 }
1203 #endif
1204 tmdp->blen = -len;
1205
1206 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
1207 writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
1208
1209 p->xmit_queued = 1;
1210 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
1211
1212 if(p->tmdnum != p->tmdlast)
1213 netif_wake_queue(dev);
1214
1215 p->lock = 0;
1216
1217 spin_unlock_irqrestore(&p->ring_lock, flags);
1218 }
1219
1220 return NETDEV_TX_OK;
1221 }
1222
set_multicast_list(struct net_device * dev)1223 static void set_multicast_list(struct net_device *dev)
1224 {
1225 if(!ni65_lance_reinit(dev))
1226 printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
1227 netif_wake_queue(dev);
1228 }
1229
1230 #ifdef MODULE
1231 static struct net_device *dev_ni65;
1232
1233 module_param(irq, int, 0);
1234 module_param(io, int, 0);
1235 module_param(dma, int, 0);
1236 MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
1237 MODULE_PARM_DESC(io, "ni6510 I/O base address");
1238 MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1239
init_module(void)1240 int __init init_module(void)
1241 {
1242 dev_ni65 = ni65_probe(-1);
1243 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
1244 }
1245
cleanup_module(void)1246 void __exit cleanup_module(void)
1247 {
1248 unregister_netdev(dev_ni65);
1249 cleanup_card(dev_ni65);
1250 free_netdev(dev_ni65);
1251 }
1252 #endif /* MODULE */
1253
1254 MODULE_LICENSE("GPL");
1255