1 /* $Id$
2 * bagetlance.c: Ethernet driver for VME Lance cards on Baget/MIPS
3 * This code stealed and adopted from linux/drivers/net/atarilance.c
4 * See that for author info
5 *
6 * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
7 */
8
9 /*
10 * Driver code for Baget/Lance taken from atarilance.c, which also
11 * works well in case of Besta. Most significant changes made here
12 * related with 16BIT-only access to A24 space.
13 */
14
15 static char *version = "bagetlance.c: v1.1 11/10/98\n";
16
17 #include <linux/module.h>
18
19 #include <linux/stddef.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/string.h>
23 #include <linux/ptrace.h>
24 #include <linux/errno.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28
29 #include <asm/irq.h>
30 #include <asm/bitops.h>
31 #include <asm/io.h>
32
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36
37 #include <asm/baget/baget.h>
38
39 #define BAGET_LANCE_IRQ BAGET_IRQ_MASK(0xdf)
40
41 /*
42 * Define following if you don't need 16BIT-only access to Lance memory
43 * (Normally BAGET needs it)
44 */
45 #undef NORMAL_MEM_ACCESS
46
47 /* Debug level:
48 * 0 = silent, print only serious errors
49 * 1 = normal, print error messages
50 * 2 = debug, print debug infos
51 * 3 = debug, print even more debug infos (packet data)
52 */
53
54 #define LANCE_DEBUG 1
55
56 #ifdef LANCE_DEBUG
57 static int lance_debug = LANCE_DEBUG;
58 #else
59 static int lance_debug = 1;
60 #endif
61 MODULE_PARM(lance_debug, "i");
62 MODULE_PARM_DESC(lance_debug, "Lance debug level (0-3)");
63 MODULE_LICENSE("GPL");
64
65 /* Print debug messages on probing? */
66 #undef LANCE_DEBUG_PROBE
67
68 #define DPRINTK(n,a) \
69 do { \
70 if (lance_debug >= n) \
71 printk a; \
72 } while( 0 )
73
74 #ifdef LANCE_DEBUG_PROBE
75 # define PROBE_PRINT(a) printk a
76 #else
77 # define PROBE_PRINT(a)
78 #endif
79
80 /* These define the number of Rx and Tx buffers as log2. (Only powers
81 * of two are valid)
82 * Much more rx buffers (32) are reserved than tx buffers (8), since receiving
83 * is more time critical then sending and packets may have to remain in the
84 * board's memory when main memory is low.
85 */
86
87 /* Baget Lance has 64K on-board memory, so it looks we can't increase
88 buffer quantity (40*1.5K is about 64K) */
89
90 #define TX_LOG_RING_SIZE 3
91 #define RX_LOG_RING_SIZE 5
92
93 /* These are the derived values */
94
95 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
96 #define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
97 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
98
99 #define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
100 #define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
101 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
102
103 /* The LANCE Rx and Tx ring descriptors. */
104 struct lance_rx_head {
105 volatile unsigned short base; /* Low word of base addr */
106 #ifdef NORMAL_MEM_ACCESS
107 /* Following two fields are joined into one short to guarantee
108 16BIT access to Baget lance registers */
109 volatile unsigned char flag;
110 unsigned char base_hi; /* High word of base addr (unused) */
111 #else
112 /* Following macros are used as replecements to 8BIT fields */
113 #define GET_FLAG(h) (((h)->flag_base_hi >> 8) & 0xff)
114 #define SET_FLAG(h,f) (h)->flag_base_hi = ((h)->flag_base_hi & 0xff) | \
115 (((unsigned)(f)) << 8)
116 volatile unsigned short flag_base_hi;
117 #endif
118 volatile short buf_length; /* This length is 2s complement! */
119 volatile short msg_length; /* This length is "normal". */
120 };
121
122
123 struct lance_tx_head {
124 volatile unsigned short base; /* Low word of base addr */
125 #ifdef NORMAL_MEM_ACCESS
126 /* See comments above about 8BIT-access Baget A24-space problems */
127 volatile unsigned char flag;
128 unsigned char base_hi; /* High word of base addr (unused) */
129 #else
130 volatile unsigned short flag_base_hi;
131 #endif
132 volatile short length; /* Length is 2s complement! */
133 volatile short misc;
134 };
135
136 struct ringdesc {
137 volatile unsigned short adr_lo; /* Low 16 bits of address */
138 #ifdef NORMAL_MEM_ACCESS
139 /* See comments above about 8BIT-access Bage A24-space problems */
140 unsigned char len; /* Length bits */
141 unsigned char adr_hi; /* High 8 bits of address (unused) */
142 #else
143 volatile unsigned short len_adr_hi;
144 #endif
145 };
146
147 /* The LANCE initialization block, described in databook. */
148 struct lance_init_block {
149 unsigned short mode; /* Pre-set mode */
150 unsigned char hwaddr[6]; /* Physical ethernet address */
151 unsigned filter[2]; /* Multicast filter (unused). */
152 /* Receive and transmit ring base, along with length bits. */
153 struct ringdesc rx_ring;
154 struct ringdesc tx_ring;
155 };
156
157 /* The whole layout of the Lance shared memory */
158 struct lance_memory {
159 struct lance_init_block init;
160 struct lance_tx_head tx_head[TX_RING_SIZE];
161 struct lance_rx_head rx_head[RX_RING_SIZE];
162 char packet_area[0]; /* packet data follow after the
163 * init block and the ring
164 * descriptors and are located
165 * at runtime */
166 };
167
168 /* RieblCard specifics:
169 * The original TOS driver for these cards reserves the area from offset
170 * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the
171 * Ethernet address there, and the magic for verifying the data's validity.
172 * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe
173 * is reserved for the interrupt vector number.
174 */
175 #define RIEBL_RSVD_START 0xee70
176 #define RIEBL_RSVD_END 0xeec0
177 #define RIEBL_MAGIC 0x09051990
178 #define RIEBL_MAGIC_ADDR ((unsigned long *)(((char *)MEM) + 0xee8a))
179 #define RIEBL_HWADDR_ADDR ((unsigned char *)(((char *)MEM) + 0xee8e))
180 #define RIEBL_IVEC_ADDR ((unsigned short *)(((char *)MEM) + 0xfffe))
181
182 /* This is a default address for the old RieblCards without a battery
183 * that have no ethernet address at boot time. 00:00:36:04 is the
184 * prefix for Riebl cards, the 00:00 at the end is arbitrary.
185 */
186
187 static unsigned char OldRieblDefHwaddr[6] = {
188 0x00, 0x00, 0x36, 0x04, 0x00, 0x00
189 };
190
191 /* I/O registers of the Lance chip */
192
193 struct lance_ioreg {
194 /* base+0x0 */ volatile unsigned short data;
195 /* base+0x2 */ volatile unsigned short addr;
196 unsigned char _dummy1[3];
197 /* base+0x7 */ volatile unsigned char ivec;
198 unsigned char _dummy2[5];
199 /* base+0xd */ volatile unsigned char eeprom;
200 unsigned char _dummy3;
201 /* base+0xf */ volatile unsigned char mem;
202 };
203
204 /* Types of boards this driver supports */
205
206 enum lance_type {
207 OLD_RIEBL, /* old Riebl card without battery */
208 NEW_RIEBL, /* new Riebl card with battery */
209 PAM_CARD /* PAM card with EEPROM */
210 };
211
212 static char *lance_names[] = {
213 "Riebl-Card (without battery)",
214 "Riebl-Card (with battery)",
215 "PAM intern card"
216 };
217
218 /* The driver's private device structure */
219
220 struct lance_private {
221 enum lance_type cardtype;
222 struct lance_ioreg *iobase;
223 struct lance_memory *mem;
224 int cur_rx, cur_tx; /* The next free ring entry */
225 int dirty_tx; /* Ring entries to be freed. */
226 /* copy function */
227 void *(*memcpy_f)( void *, const void *, size_t );
228 struct net_device_stats stats;
229 /* These two must be longs for set_bit() */
230 long tx_full;
231 long lock;
232 };
233
234 /* I/O register access macros */
235
236 #define MEM lp->mem
237 #define DREG IO->data
238 #define AREG IO->addr
239 #define REGA(a) ( AREG = (a), DREG )
240
241 /* Definitions for packet buffer access: */
242 #define PKT_BUF_SZ 1544
243 /* Get the address of a packet buffer corresponding to a given buffer head */
244 #define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base)
245
246 /* Possible memory/IO addresses for probing */
247
248 struct lance_addr {
249 unsigned long memaddr;
250 unsigned long ioaddr;
251 int slow_flag;
252 } lance_addr_list[] = {
253 { BAGET_LANCE_MEM_BASE, BAGET_LANCE_IO_BASE, 1 } /* Baget Lance */
254 };
255
256 #define N_LANCE_ADDR (sizeof(lance_addr_list)/sizeof(*lance_addr_list))
257
258
259 #define LANCE_HI_BASE (0xff & (BAGET_LANCE_MEM_BASE >> 16))
260
261 /* Definitions for the Lance */
262
263 /* tx_head flags */
264 #define TMD1_ENP 0x01 /* end of packet */
265 #define TMD1_STP 0x02 /* start of packet */
266 #define TMD1_DEF 0x04 /* deferred */
267 #define TMD1_ONE 0x08 /* one retry needed */
268 #define TMD1_MORE 0x10 /* more than one retry needed */
269 #define TMD1_ERR 0x40 /* error summary */
270 #define TMD1_OWN 0x80 /* ownership (set: chip owns) */
271
272 #define TMD1_OWN_CHIP TMD1_OWN
273 #define TMD1_OWN_HOST 0
274
275 /* tx_head misc field */
276 #define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
277 #define TMD3_RTRY 0x0400 /* failed after 16 retries */
278 #define TMD3_LCAR 0x0800 /* carrier lost */
279 #define TMD3_LCOL 0x1000 /* late collision */
280 #define TMD3_UFLO 0x4000 /* underflow (late memory) */
281 #define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
282
283 /* rx_head flags */
284 #define RMD1_ENP 0x01 /* end of packet */
285 #define RMD1_STP 0x02 /* start of packet */
286 #define RMD1_BUFF 0x04 /* buffer error */
287 #define RMD1_CRC 0x08 /* CRC error */
288 #define RMD1_OFLO 0x10 /* overflow */
289 #define RMD1_FRAM 0x20 /* framing error */
290 #define RMD1_ERR 0x40 /* error summary */
291 #define RMD1_OWN 0x80 /* ownership (set: ship owns) */
292
293 #define RMD1_OWN_CHIP RMD1_OWN
294 #define RMD1_OWN_HOST 0
295
296 /* register names */
297 #define CSR0 0 /* mode/status */
298 #define CSR1 1 /* init block addr (low) */
299 #define CSR2 2 /* init block addr (high) */
300 #define CSR3 3 /* misc */
301 #define CSR8 8 /* address filter */
302 #define CSR15 15 /* promiscuous mode */
303
304 /* CSR0 */
305 /* (R=readable, W=writeable, S=set on write, C=clear on write) */
306 #define CSR0_INIT 0x0001 /* initialize (RS) */
307 #define CSR0_STRT 0x0002 /* start (RS) */
308 #define CSR0_STOP 0x0004 /* stop (RS) */
309 #define CSR0_TDMD 0x0008 /* transmit demand (RS) */
310 #define CSR0_TXON 0x0010 /* transmitter on (R) */
311 #define CSR0_RXON 0x0020 /* receiver on (R) */
312 #define CSR0_INEA 0x0040 /* interrupt enable (RW) */
313 #define CSR0_INTR 0x0080 /* interrupt active (R) */
314 #define CSR0_IDON 0x0100 /* initialization done (RC) */
315 #define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
316 #define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
317 #define CSR0_MERR 0x0800 /* memory error (RC) */
318 #define CSR0_MISS 0x1000 /* missed frame (RC) */
319 #define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
320 #define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
321 #define CSR0_ERR 0x8000 /* error (RC) */
322
323 /* CSR3 */
324 #define CSR3_BCON 0x0001 /* byte control */
325 #define CSR3_ACON 0 // fixme: 0x0002 /* ALE control */
326 #define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
327
328
329
330 /***************************** Prototypes *****************************/
331
332 static int addr_accessible( volatile void *regp, int wordflag, int
333 writeflag );
334 static int lance_probe1( struct net_device *dev, struct lance_addr *init_rec );
335 static int lance_open( struct net_device *dev );
336 static void lance_init_ring( struct net_device *dev );
337 static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
338 static void lance_interrupt( int irq, void *dev_id, struct pt_regs *fp );
339 static int lance_rx( struct net_device *dev );
340 static int lance_close( struct net_device *dev );
341 static struct net_device_stats *lance_get_stats( struct net_device *dev );
342 static void set_multicast_list( struct net_device *dev );
343 static int lance_set_mac_address( struct net_device *dev, void *addr );
344
345 /************************* End of Prototypes **************************/
346
347 /* Network traffic statistic (bytes) */
348
349 int lance_stat = 0;
350
update_lance_stat(int len)351 static void update_lance_stat (int len) {
352 lance_stat += len;
353 }
354
355 /*
356 This function is used to access Baget/Lance memory to avoid
357 8/32BIT access to VAC A24 space
358 ALL memcpy calls was chenged to this function to avoid dbe problems
359 Don't confuse with function name -- it stays from original code
360 */
361
slow_memcpy(void * dst,const void * src,size_t len)362 void *slow_memcpy( void *dst, const void *src, size_t len )
363
364 {
365 unsigned long to = (unsigned long)dst;
366 unsigned long from = (unsigned long)src;
367 unsigned long to_end = to +len;
368
369 /* Unaligned flags */
370
371 int odd_from = from & 1;
372 int odd_to = to & 1;
373 int odd_to_end = to_end & 1;
374
375 /* Align for 16BIT-access first */
376
377 register unsigned short *from_a = (unsigned short*) (from & ~1);
378 register unsigned short *to_a = (unsigned short*) (to & ~1);
379 register unsigned short *to_end_a = (unsigned short*) (to_end & ~1);
380
381 /* Caching values -- not in loop invariant */
382
383 register unsigned short from_v;
384 register unsigned short to_v;
385
386 /* Invariant is: from_a and to_a are pointers before or exactly to
387 currently copying byte */
388
389 if (odd_to) {
390 /* First byte unaligned case */
391 from_v = *from_a;
392 to_v = *to_a;
393
394 to_v &= ~0xff;
395 to_v |= 0xff & (from_v >> (odd_from ? 0 : 8));
396 *to_a++ = to_v;
397
398 if (odd_from) from_a++;
399 }
400 if (odd_from == odd_to) {
401 /* Same parity */
402 while (to_a + 7 < to_end_a) {
403 unsigned long dummy1, dummy2;
404 unsigned long reg1, reg2, reg3, reg4;
405
406 __asm__ __volatile__(
407 ".set\tnoreorder\n\t"
408 ".set\tnoat\n\t"
409 "lh\t%2,0(%1)\n\t"
410 "nop\n\t"
411 "lh\t%3,2(%1)\n\t"
412 "sh\t%2,0(%0)\n\t"
413 "lh\t%4,4(%1)\n\t"
414 "sh\t%3,2(%0)\n\t"
415 "lh\t%5,6(%1)\n\t"
416 "sh\t%4,4(%0)\n\t"
417 "lh\t%2,8(%1)\n\t"
418 "sh\t%5,6(%0)\n\t"
419 "lh\t%3,10(%1)\n\t"
420 "sh\t%2,8(%0)\n\t"
421 "lh\t%4,12(%1)\n\t"
422 "sh\t%3,10(%0)\n\t"
423 "lh\t%5,14(%1)\n\t"
424 "sh\t%4,12(%0)\n\t"
425 "nop\n\t"
426 "sh\t%5,14(%0)\n\t"
427 ".set\tat\n\t"
428 ".set\treorder"
429 :"=r" (dummy1), "=r" (dummy2),
430 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
431 :"0" (to_a), "1" (from_a)
432 :"memory");
433
434 to_a += 8;
435 from_a += 8;
436
437 }
438 while (to_a < to_end_a) {
439 *to_a++ = *from_a++;
440 }
441 } else {
442 /* Different parity */
443 from_v = *from_a;
444 while (to_a < to_end_a) {
445 unsigned short from_v_next;
446 from_v_next = *++from_a;
447 *to_a++ = ((from_v & 0xff)<<8) | ((from_v_next>>8) & 0xff);
448 from_v = from_v_next;
449 }
450
451 }
452 if (odd_to_end) {
453 /* Last byte unaligned case */
454 to_v = *to_a;
455 from_v = *from_a;
456
457 to_v &= ~0xff00;
458 if (odd_from == odd_to) {
459 to_v |= from_v & 0xff00;
460 } else {
461 to_v |= (from_v<<8) & 0xff00;
462 }
463
464 *to_a = to_v;
465 }
466
467 update_lance_stat( len );
468
469 return( dst );
470 }
471
472
bagetlance_probe(struct net_device * dev)473 int __init bagetlance_probe( struct net_device *dev )
474
475 { int i;
476 static int found;
477
478 SET_MODULE_OWNER(dev);
479
480 if (found)
481 /* Assume there's only one board possible... That seems true, since
482 * the Riebl/PAM board's address cannot be changed. */
483 return( -ENODEV );
484
485 for( i = 0; i < N_LANCE_ADDR; ++i ) {
486 if (lance_probe1( dev, &lance_addr_list[i] )) {
487 found = 1;
488 return( 0 );
489 }
490 }
491
492 return( -ENODEV );
493 }
494
495
496
497 /* Derived from hwreg_present() in vme/config.c: */
498
addr_accessible(volatile void * regp,int wordflag,int writeflag)499 static int __init addr_accessible( volatile void *regp,
500 int wordflag,
501 int writeflag )
502 {
503 /* We have a fine function to do it */
504 extern int try_read(unsigned long, int);
505 return try_read((unsigned long)regp, sizeof(short)) != -1;
506 }
507
508
509
510 /* Original atari driver uses it */
511 #define IRQ_TYPE_PRIO SA_INTERRUPT
512 #define IRQ_SOURCE_TO_VECTOR(x) (x)
513
lance_probe1(struct net_device * dev,struct lance_addr * init_rec)514 static int __init lance_probe1( struct net_device *dev,
515 struct lance_addr *init_rec )
516
517 { volatile unsigned short *memaddr =
518 (volatile unsigned short *)init_rec->memaddr;
519 volatile unsigned short *ioaddr =
520 (volatile unsigned short *)init_rec->ioaddr;
521 struct lance_private *lp;
522 struct lance_ioreg *IO;
523 int i;
524 static int did_version;
525 unsigned short save1, save2;
526
527 PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
528 (long)memaddr, (long)ioaddr ));
529
530 /* Test whether memory readable and writable */
531 PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" ));
532 if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail;
533
534 if ((unsigned long)memaddr >= KSEG2) {
535 extern int kseg2_alloc_io (unsigned long addr, unsigned long size);
536 if (kseg2_alloc_io((unsigned long)memaddr, BAGET_LANCE_MEM_SIZE)) {
537 printk("bagetlance: unable map lance memory\n");
538 goto probe_fail;
539 }
540 }
541
542 /* Written values should come back... */
543 PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" ));
544 save1 = *memaddr;
545 *memaddr = 0x0001;
546 if (*memaddr != 0x0001) goto probe_fail;
547 PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" ));
548 *memaddr = 0x0000;
549 if (*memaddr != 0x0000) goto probe_fail;
550 *memaddr = save1;
551
552 /* First port should be readable and writable */
553 PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" ));
554 if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail;
555
556 /* and written values should be readable */
557 PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" ));
558 save2 = ioaddr[1];
559 ioaddr[1] = 0x0001;
560 if (ioaddr[1] != 0x0001) goto probe_fail;
561
562 /* The CSR0_INIT bit should not be readable */
563 PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" ));
564 save1 = ioaddr[0];
565 ioaddr[1] = CSR0;
566 ioaddr[0] = CSR0_INIT | CSR0_STOP;
567 if (ioaddr[0] != CSR0_STOP) {
568 ioaddr[0] = save1;
569 ioaddr[1] = save2;
570 goto probe_fail;
571 }
572 PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" ));
573 ioaddr[0] = CSR0_STOP;
574 if (ioaddr[0] != CSR0_STOP) {
575 ioaddr[0] = save1;
576 ioaddr[1] = save2;
577 goto probe_fail;
578 }
579
580 /* Now ok... */
581 PROBE_PRINT(( "lance_probe1: Lance card detected\n" ));
582 goto probe_ok;
583
584 probe_fail:
585 return( 0 );
586
587 probe_ok:
588 init_etherdev( dev, sizeof(struct lance_private) );
589 if (!dev->priv) {
590 dev->priv = kmalloc( sizeof(struct lance_private), GFP_KERNEL );
591 if (!dev->priv)
592 return 0;
593 }
594 lp = (struct lance_private *)dev->priv;
595 MEM = (struct lance_memory *)memaddr;
596 IO = lp->iobase = (struct lance_ioreg *)ioaddr;
597 dev->base_addr = (unsigned long)ioaddr; /* informational only */
598 lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy;
599
600 REGA( CSR0 ) = CSR0_STOP;
601
602 /* Now test for type: If the eeprom I/O port is readable, it is a
603 * PAM card */
604 if (addr_accessible( &(IO->eeprom), 0, 0 )) {
605 /* Switch back to Ram */
606 i = IO->mem;
607 lp->cardtype = PAM_CARD;
608 }
609 #ifdef NORMAL_MEM_ACCESS
610 else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) {
611 #else
612 else if (({
613 unsigned short *a = (unsigned short*)RIEBL_MAGIC_ADDR;
614 (((int)a[0]) << 16) + ((int)a[1]) == RIEBL_MAGIC;
615 })) {
616 #endif
617 lp->cardtype = NEW_RIEBL;
618 }
619 else
620 lp->cardtype = OLD_RIEBL;
621
622 if (lp->cardtype == PAM_CARD ||
623 memaddr == (unsigned short *)0xffe00000) {
624 /* PAMs card and Riebl on ST use level 5 autovector */
625 request_irq(BAGET_LANCE_IRQ, lance_interrupt, IRQ_TYPE_PRIO,
626 "PAM/Riebl-ST Ethernet", dev);
627 dev->irq = (unsigned short)BAGET_LANCE_IRQ;
628 }
629 else {
630 /* For VME-RieblCards, request a free VME int;
631 * (This must be unsigned long, since dev->irq is short and the
632 * IRQ_MACHSPEC bit would be cut off...)
633 */
634 unsigned long irq = BAGET_LANCE_IRQ;
635 if (!irq) {
636 printk( "Lance: request for VME interrupt failed\n" );
637 return( 0 );
638 }
639 request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
640 "Riebl-VME Ethernet", dev);
641 dev->irq = irq;
642 }
643
644 printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ",
645 dev->name, lance_names[lp->cardtype],
646 (unsigned long)ioaddr,
647 (unsigned long)memaddr,
648 dev->irq,
649 init_rec->slow_flag ? " (slow memcpy)" : "" );
650
651 /* Get the ethernet address */
652 switch( lp->cardtype ) {
653 case OLD_RIEBL:
654 /* No ethernet address! (Set some default address) */
655 slow_memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
656 break;
657 case NEW_RIEBL:
658 lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
659 break;
660 case PAM_CARD:
661 i = IO->eeprom;
662 for( i = 0; i < 6; ++i )
663 dev->dev_addr[i] =
664 ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
665 ((((unsigned short *)MEM)[i*2+1] & 0x0f));
666 i = IO->mem;
667 break;
668 }
669 for( i = 0; i < 6; ++i )
670 printk( "%02x%s", dev->dev_addr[i], (i < 5) ? ":" : "\n" );
671 if (lp->cardtype == OLD_RIEBL) {
672 printk( "%s: Warning: This is a default ethernet address!\n",
673 dev->name );
674 printk( " Use \"ifconfig hw ether ...\" to set the address.\n" );
675 }
676
677 MEM->init.mode = 0x0000; /* Disable Rx and Tx. */
678
679 {
680 unsigned char hwaddr[6];
681 for( i = 0; i < 6; i++ )
682 hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
683 slow_memcpy(MEM->init.hwaddr, hwaddr, sizeof(hwaddr));
684 }
685
686 MEM->init.filter[0] = 0x00000000;
687 MEM->init.filter[1] = 0x00000000;
688 MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head );
689
690 #ifdef NORMAL_MEM_ACCESS
691 MEM->init.rx_ring.adr_hi = LANCE_HI_BASE;
692 MEM->init.rx_ring.len = RX_RING_LEN_BITS;
693 #else
694 MEM->init.rx_ring.len_adr_hi =
695 ((unsigned)RX_RING_LEN_BITS << 8) | LANCE_HI_BASE;
696 #endif
697
698
699 MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head );
700
701 #ifdef NORMAL_MEM_ACCESS
702 MEM->init.tx_ring.adr_hi = LANCE_HI_BASE;
703 MEM->init.tx_ring.len = TX_RING_LEN_BITS;
704 #else
705 MEM->init.tx_ring.len_adr_hi =
706 ((unsigned)TX_RING_LEN_BITS<<8) | LANCE_HI_BASE;
707 #endif
708
709 if (lp->cardtype == PAM_CARD)
710 IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq);
711 else
712 *RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq);
713
714 if (did_version++ == 0)
715 DPRINTK( 1, ( version ));
716
717 /* The LANCE-specific entries in the device structure. */
718 dev->open = &lance_open;
719 dev->hard_start_xmit = &lance_start_xmit;
720 dev->stop = &lance_close;
721 dev->get_stats = &lance_get_stats;
722 dev->set_multicast_list = &set_multicast_list;
723 dev->set_mac_address = &lance_set_mac_address;
724 dev->start = 0;
725
726 memset( &lp->stats, 0, sizeof(lp->stats) );
727
728 return( 1 );
729 }
730
731
732 static int lance_open( struct net_device *dev )
733
734 { struct lance_private *lp = (struct lance_private *)dev->priv;
735 struct lance_ioreg *IO = lp->iobase;
736 int i;
737
738 DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
739
740 lance_init_ring(dev);
741 /* Re-initialize the LANCE, and start it when done. */
742
743 REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
744 REGA( CSR2 ) = 0;
745 REGA( CSR1 ) = 0;
746 REGA( CSR0 ) = CSR0_INIT;
747 /* From now on, AREG is kept to point to CSR0 */
748
749 i = 1000000;
750 while (--i > 0)
751 if (DREG & CSR0_IDON)
752 break;
753 if (i < 0 || (DREG & CSR0_ERR)) {
754 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
755 dev->name, i, DREG ));
756 DREG = CSR0_STOP;
757 return( -EIO );
758 }
759 DREG = CSR0_IDON;
760 DREG = CSR0_STRT;
761 DREG = CSR0_INEA;
762
763 dev->tbusy = 0;
764 dev->interrupt = 0;
765 dev->start = 1;
766
767 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
768 return( 0 );
769 }
770
771
772 /* Initialize the LANCE Rx and Tx rings. */
773
774 static void lance_init_ring( struct net_device *dev )
775
776 { struct lance_private *lp = (struct lance_private *)dev->priv;
777 int i;
778 unsigned offset;
779
780 lp->lock = 0;
781 lp->tx_full = 0;
782 lp->cur_rx = lp->cur_tx = 0;
783 lp->dirty_tx = 0;
784
785 offset = offsetof( struct lance_memory, packet_area );
786
787 /* If the packet buffer at offset 'o' would conflict with the reserved area
788 * of RieblCards, advance it */
789 #define CHECK_OFFSET(o) \
790 do { \
791 if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) { \
792 if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \
793 : (o) < RIEBL_RSVD_END) \
794 (o) = RIEBL_RSVD_END; \
795 } \
796 } while(0)
797
798 for( i = 0; i < TX_RING_SIZE; i++ ) {
799 CHECK_OFFSET(offset);
800 MEM->tx_head[i].base = offset;
801 #ifdef NORMAL_MEM_ACCESS
802 MEM->tx_head[i].flag = TMD1_OWN_HOST;
803 MEM->tx_head[i].base_hi = LANCE_HI_BASE;
804 #else
805 MEM->tx_head[i].flag_base_hi =
806 (TMD1_OWN_HOST<<8) | LANCE_HI_BASE;
807 #endif
808 MEM->tx_head[i].length = 0;
809 MEM->tx_head[i].misc = 0;
810 offset += PKT_BUF_SZ;
811 }
812
813 for( i = 0; i < RX_RING_SIZE; i++ ) {
814 CHECK_OFFSET(offset);
815 MEM->rx_head[i].base = offset;
816 #ifdef NORMAL_MEM_ACCESS
817 MEM->rx_head[i].flag = TMD1_OWN_CHIP;
818 MEM->rx_head[i].base_hi = LANCE_HI_BASE;
819 #else
820 MEM->rx_head[i].flag_base_hi =
821 (TMD1_OWN_CHIP<<8) | LANCE_HI_BASE;
822 #endif
823 MEM->rx_head[i].buf_length = -PKT_BUF_SZ;
824 MEM->rx_head[i].msg_length = 0;
825 offset += PKT_BUF_SZ;
826 }
827 }
828
829
830 static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
831
832 { struct lance_private *lp = (struct lance_private *)dev->priv;
833 struct lance_ioreg *IO = lp->iobase;
834 int entry, len;
835 struct lance_tx_head *head;
836 unsigned long flags;
837
838 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
839 len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
840 /* PAM-Card has a bug: Can only send packets with even number of bytes! */
841 if (lp->cardtype == PAM_CARD && (len & 1))
842 ++len;
843
844 if (len > skb->len)
845 {
846 skb = skb_padto(skb, len);
847 if(skb == NULL)
848 return 0;
849 }
850
851 /* Transmitter timeout, serious problems. */
852 if (dev->tbusy) {
853 int tickssofar = jiffies - dev->trans_start;
854 if (tickssofar < 20)
855 return( 1 );
856 AREG = CSR0;
857 DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
858 dev->name, DREG ));
859 DREG = CSR0_STOP;
860 /*
861 * Always set BSWP after a STOP as STOP puts it back into
862 * little endian mode.
863 */
864 REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
865 lp->stats.tx_errors++;
866 #ifndef final_version
867 { int i;
868 DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n",
869 lp->dirty_tx, lp->cur_tx,
870 lp->tx_full ? " (full)" : "",
871 lp->cur_rx ));
872 for( i = 0 ; i < RX_RING_SIZE; i++ )
873 DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
874 i, MEM->rx_head[i].base,
875 -MEM->rx_head[i].buf_length,
876 MEM->rx_head[i].msg_length ));
877 for( i = 0 ; i < TX_RING_SIZE; i++ )
878 DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n",
879 i, MEM->tx_head[i].base,
880 -MEM->tx_head[i].length,
881 MEM->tx_head[i].misc ));
882 }
883 #endif
884 lance_init_ring(dev);
885 REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
886
887 dev->tbusy = 0;
888 dev->trans_start = jiffies;
889
890 return( 0 );
891 }
892
893 DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
894 dev->name, DREG ));
895
896 /* Block a timer-based transmit from overlapping. This could better be
897 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
898 if (test_and_set_bit( 0, (void*)&dev->tbusy ) != 0) {
899 DPRINTK( 0, ( "%s: Transmitter access conflict.\n", dev->name ));
900 return 1;
901 }
902
903 if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) {
904 DPRINTK( 0, ( "%s: tx queue lock!.\n", dev->name ));
905 /* don't clear dev->tbusy flag. */
906 return 1;
907 }
908
909 /* Fill in a Tx ring entry */
910 if (lance_debug >= 3) {
911 u_char *p;
912 int i;
913 printk( "%s: TX pkt type 0x%04x from ", dev->name,
914 ((u_short *)skb->data)[6]);
915 for( p = &((u_char *)skb->data)[6], i = 0; i < 6; i++ )
916 printk("%02x%s", *p++, i != 5 ? ":" : "" );
917 printk(" to ");
918 for( p = (u_char *)skb->data, i = 0; i < 6; i++ )
919 printk("%02x%s", *p++, i != 5 ? ":" : "" );
920 printk(" data at 0x%08x len %d\n", (int)skb->data,
921 (int)skb->len );
922 }
923
924 /* We're not prepared for the int until the last flags are set/reset. And
925 * the int may happen already after setting the OWN_CHIP... */
926 save_flags(flags);
927 cli();
928
929 /* Mask to ring buffer boundary. */
930 entry = lp->cur_tx & TX_RING_MOD_MASK;
931 head = &(MEM->tx_head[entry]);
932
933 /* Caution: the write order is important here, set the "ownership" bits
934 * last.
935 */
936
937 head->length = -len;
938 head->misc = 0;
939 lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
940 #ifdef NORMAL_MEM_ACCESS
941 head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
942 #else
943 SET_FLAG(head,(TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP));
944 #endif
945 lp->stats.tx_bytes += skb->len;
946 dev_kfree_skb( skb );
947 lp->cur_tx++;
948 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
949 lp->cur_tx -= TX_RING_SIZE;
950 lp->dirty_tx -= TX_RING_SIZE;
951 }
952
953 /* Trigger an immediate send poll. */
954 DREG = CSR0_INEA | CSR0_TDMD;
955 dev->trans_start = jiffies;
956
957 lp->lock = 0;
958 #ifdef NORMAL_MEM_ACCESS
959 if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
960 #else
961 if ((GET_FLAG(&MEM->tx_head[(entry+1) & TX_RING_MOD_MASK]) & TMD1_OWN) ==
962 #endif
963 TMD1_OWN_HOST)
964 dev->tbusy = 0;
965 else
966 lp->tx_full = 1;
967 restore_flags(flags);
968
969 return 0;
970 }
971
972 /* The LANCE interrupt handler. */
973
974 static void lance_interrupt( int irq, void *dev_id, struct pt_regs *fp)
975 {
976 struct net_device *dev = dev_id;
977 struct lance_private *lp;
978 struct lance_ioreg *IO;
979 int csr0, boguscnt = 10;
980
981 if (dev == NULL) {
982 DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
983 return;
984 }
985
986 lp = (struct lance_private *)dev->priv;
987 IO = lp->iobase;
988 AREG = CSR0;
989
990 if (dev->interrupt) {
991 DPRINTK( 1, ( "Re-entering CAUSE=%08x STATUS=%08x\n",
992 read_32bit_cp0_register(CP0_CAUSE),
993 read_32bit_cp0_register(CP0_STATUS) ));
994 panic("lance: interrupt handler reentered !");
995 }
996
997 dev->interrupt = 1;
998
999 while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) &&
1000 --boguscnt >= 0) {
1001 /* Acknowledge all of the current interrupt sources ASAP. */
1002 DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP |
1003 CSR0_TDMD | CSR0_INEA);
1004
1005 DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
1006 dev->name, csr0, DREG ));
1007
1008 if (csr0 & CSR0_RINT) /* Rx interrupt */
1009 lance_rx( dev );
1010
1011 if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
1012 int dirty_tx = lp->dirty_tx;
1013
1014 while( dirty_tx < lp->cur_tx) {
1015 int entry = dirty_tx & TX_RING_MOD_MASK;
1016 #ifdef NORMAL_MEM_ACCESS
1017 int status = MEM->tx_head[entry].flag;
1018 #else
1019 int status = GET_FLAG(&MEM->tx_head[entry]);
1020 #endif
1021 if (status & TMD1_OWN_CHIP)
1022 break; /* It still hasn't been Txed */
1023
1024 #ifdef NORMAL_MEM_ACCESS
1025 MEM->tx_head[entry].flag = 0;
1026 #else
1027 SET_FLAG(&MEM->tx_head[entry],0);
1028 #endif
1029
1030 if (status & TMD1_ERR) {
1031 /* There was an major error, log it. */
1032 int err_status = MEM->tx_head[entry].misc;
1033 lp->stats.tx_errors++;
1034 if (err_status & TMD3_RTRY) lp->stats.tx_aborted_errors++;
1035 if (err_status & TMD3_LCAR) lp->stats.tx_carrier_errors++;
1036 if (err_status & TMD3_LCOL) lp->stats.tx_window_errors++;
1037 if (err_status & TMD3_UFLO) {
1038 /* Ackk! On FIFO errors the Tx unit is turned off! */
1039 lp->stats.tx_fifo_errors++;
1040 /* Remove this verbosity later! */
1041 DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n",
1042 dev->name, csr0 ));
1043 /* Restart the chip. */
1044 DREG = CSR0_STRT;
1045 }
1046 } else {
1047 if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF))
1048 lp->stats.collisions++;
1049 lp->stats.tx_packets++;
1050 }
1051 dirty_tx++;
1052 }
1053
1054 #ifndef final_version
1055 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1056 DPRINTK( 0, ( "out-of-sync dirty pointer,"
1057 " %d vs. %d, full=%d.\n",
1058 dirty_tx, lp->cur_tx, lp->tx_full ));
1059 dirty_tx += TX_RING_SIZE;
1060 }
1061 #endif
1062
1063 if (lp->tx_full && dev->tbusy
1064 && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
1065 /* The ring is no longer full, clear tbusy. */
1066 lp->tx_full = 0;
1067 dev->tbusy = 0;
1068 mark_bh( NET_BH );
1069 }
1070
1071 lp->dirty_tx = dirty_tx;
1072 }
1073
1074 /* Log misc errors. */
1075 if (csr0 & CSR0_BABL) lp->stats.tx_errors++; /* Tx babble. */
1076 if (csr0 & CSR0_MISS) lp->stats.rx_errors++; /* Missed a Rx frame. */
1077 if (csr0 & CSR0_MERR) {
1078 DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
1079 "status %04x.\n", dev->name, csr0 ));
1080 /* Restart the chip. */
1081 DREG = CSR0_STRT;
1082 }
1083 }
1084
1085 /* Clear any other interrupt, and set interrupt enable. */
1086 DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
1087 CSR0_IDON | CSR0_INEA;
1088
1089 DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
1090 dev->name, DREG ));
1091 dev->interrupt = 0;
1092 return;
1093 }
1094
1095
1096 static int lance_rx( struct net_device *dev )
1097
1098 { struct lance_private *lp = (struct lance_private *)dev->priv;
1099 int entry = lp->cur_rx & RX_RING_MOD_MASK;
1100 int i;
1101
1102 #ifdef NORMAL_MEM_ACCESS
1103 DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name,
1104 MEM->rx_head[entry].flag ));
1105 #else
1106 DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name,
1107 GET_FLAG(&MEM->rx_head[entry]) ));
1108 #endif
1109
1110 /* If we own the next entry, it's a new packet. Send it up. */
1111 #ifdef NORMAL_MEM_ACCESS
1112 while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
1113 #else
1114 while( (GET_FLAG(&MEM->rx_head[entry]) & RMD1_OWN) == RMD1_OWN_HOST ) {
1115 #endif
1116 struct lance_rx_head *head = &(MEM->rx_head[entry]);
1117 #ifdef NORMAL_MEM_ACCESS
1118 int status = head->flag;
1119 #else
1120 int status = GET_FLAG(head);
1121 #endif
1122
1123 if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
1124 /* There is a tricky error noted by John Murphy,
1125 <murf@perftech.com> to Russ Nelson: Even with full-sized
1126 buffers it's possible for a jabber packet to use two
1127 buffers, with only the last correctly noting the error. */
1128 if (status & RMD1_ENP) /* Only count a general error at the */
1129 lp->stats.rx_errors++; /* end of a packet.*/
1130 if (status & RMD1_FRAM) lp->stats.rx_frame_errors++;
1131 if (status & RMD1_OFLO) lp->stats.rx_over_errors++;
1132 if (status & RMD1_CRC) lp->stats.rx_crc_errors++;
1133 if (status & RMD1_BUFF) lp->stats.rx_fifo_errors++;
1134 #ifdef NORMAL_MEM_ACCESS
1135 head->flag &= (RMD1_ENP|RMD1_STP);
1136 #else
1137 SET_FLAG(head,GET_FLAG(head) & (RMD1_ENP|RMD1_STP));
1138 #endif
1139 } else {
1140 /* Malloc up new buffer, compatible with net-3. */
1141 short pkt_len = head->msg_length & 0xfff;
1142 struct sk_buff *skb;
1143
1144 if (pkt_len < 60) {
1145 printk( "%s: Runt packet!\n", dev->name );
1146 lp->stats.rx_errors++;
1147 }
1148 else {
1149 skb = dev_alloc_skb( pkt_len+2 );
1150 if (skb == NULL) {
1151 DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
1152 dev->name ));
1153 for( i = 0; i < RX_RING_SIZE; i++ )
1154 #ifdef NORMAL_MEM_ACCESS
1155 if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
1156 #else
1157 if (GET_FLAG(&MEM->rx_head[(entry+i) & \
1158 RX_RING_MOD_MASK]) &
1159 #endif
1160 RMD1_OWN_CHIP)
1161 break;
1162
1163 if (i > RX_RING_SIZE - 2) {
1164 lp->stats.rx_dropped++;
1165 #ifdef NORMAL_MEM_ACCESS
1166 head->flag |= RMD1_OWN_CHIP;
1167 #else
1168 SET_FLAG(head,GET_FLAG(head) | RMD1_OWN_CHIP);
1169 #endif
1170 lp->cur_rx++;
1171 }
1172 break;
1173 }
1174
1175 if (lance_debug >= 3) {
1176 u_char *data = PKTBUF_ADDR(head), *p;
1177 printk( "%s: RX pkt type 0x%04x from ", dev->name,
1178 ((u_short *)data)[6]);
1179 for( p = &data[6], i = 0; i < 6; i++ )
1180 printk("%02x%s", *p++, i != 5 ? ":" : "" );
1181 printk(" to ");
1182 for( p = data, i = 0; i < 6; i++ )
1183 printk("%02x%s", *p++, i != 5 ? ":" : "" );
1184 printk(" data %02x %02x %02x %02x %02x %02x %02x %02x "
1185 "len %d\n",
1186 data[15], data[16], data[17], data[18],
1187 data[19], data[20], data[21], data[22],
1188 pkt_len );
1189 }
1190
1191 skb->dev = dev;
1192 skb_reserve( skb, 2 ); /* 16 byte align */
1193 skb_put( skb, pkt_len ); /* Make room */
1194 lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
1195 skb->protocol = eth_type_trans( skb, dev );
1196 netif_rx( skb );
1197 dev->last_rx = jiffies;
1198 lp->stats.rx_packets++;
1199 lp->stats.rx_bytes += pkt_len;
1200 }
1201 }
1202
1203 #ifdef NORMAL_MEM_ACCESS
1204 head->flag |= RMD1_OWN_CHIP;
1205 #else
1206 SET_FLAG(head,GET_FLAG(head) | RMD1_OWN_CHIP);
1207 #endif
1208 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1209 }
1210 lp->cur_rx &= RX_RING_MOD_MASK;
1211
1212 /* From lance.c (Donald Becker): */
1213 /* We should check that at least two ring entries are free. If not,
1214 we should free one and mark stats->rx_dropped++. */
1215
1216 return 0;
1217 }
1218
1219
1220 static int lance_close( struct net_device *dev )
1221
1222 { struct lance_private *lp = (struct lance_private *)dev->priv;
1223 struct lance_ioreg *IO = lp->iobase;
1224
1225 dev->start = 0;
1226 dev->tbusy = 1;
1227
1228 AREG = CSR0;
1229
1230 DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
1231 dev->name, DREG ));
1232
1233 /* We stop the LANCE here -- it occasionally polls
1234 memory if we don't. */
1235 DREG = CSR0_STOP;
1236
1237 return 0;
1238 }
1239
1240
1241 static struct net_device_stats *lance_get_stats( struct net_device *dev )
1242
1243 {
1244 struct lance_private *lp = (struct lance_private *)dev->priv;
1245 return &lp->stats;
1246 }
1247
1248
1249 /* Set or clear the multicast filter for this adaptor.
1250 num_addrs == -1 Promiscuous mode, receive all packets
1251 num_addrs == 0 Normal mode, clear multicast list
1252 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
1253 best-effort filtering.
1254 */
1255
1256 static void set_multicast_list( struct net_device *dev )
1257
1258 { struct lance_private *lp = (struct lance_private *)dev->priv;
1259 struct lance_ioreg *IO = lp->iobase;
1260
1261 if (!dev->start)
1262 /* Only possible if board is already started */
1263 return;
1264
1265 /* We take the simple way out and always enable promiscuous mode. */
1266 DREG = CSR0_STOP; /* Temporarily stop the lance. */
1267
1268 if (dev->flags & IFF_PROMISC) {
1269 /* Log any net taps. */
1270 DPRINTK( 1, ( "%s: Promiscuous mode enabled.\n", dev->name ));
1271 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
1272 } else {
1273 short multicast_table[4];
1274 int num_addrs = dev->mc_count;
1275 int i;
1276 /* We don't use the multicast table, but rely on upper-layer
1277 * filtering. */
1278 memset( multicast_table, (num_addrs == 0) ? 0 : -1,
1279 sizeof(multicast_table) );
1280 for( i = 0; i < 4; i++ )
1281 REGA( CSR8+i ) = multicast_table[i];
1282 REGA( CSR15 ) = 0; /* Unset promiscuous mode */
1283 }
1284
1285 /*
1286 * Always set BSWP after a STOP as STOP puts it back into
1287 * little endian mode.
1288 */
1289 REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
1290
1291 /* Resume normal operation and reset AREG to CSR0 */
1292 REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
1293 }
1294
1295
1296 /* This is needed for old RieblCards and possible for new RieblCards */
1297
1298 static int lance_set_mac_address( struct net_device *dev, void *addr )
1299
1300 { struct lance_private *lp = (struct lance_private *)dev->priv;
1301 struct sockaddr *saddr = addr;
1302 int i;
1303
1304 if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
1305 return( -EOPNOTSUPP );
1306
1307 if (dev->start) {
1308 /* Only possible while card isn't started */
1309 DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
1310 dev->name ));
1311 return( -EIO );
1312 }
1313
1314 slow_memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
1315
1316 {
1317 unsigned char hwaddr[6];
1318 for( i = 0; i < 6; i++ )
1319 hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
1320 slow_memcpy(MEM->init.hwaddr, hwaddr, sizeof(hwaddr));
1321 }
1322
1323 lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
1324 /* set also the magic for future sessions */
1325 #ifdef NORMAL_MEM_ACCESS
1326 *RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
1327 #else
1328 {
1329 unsigned long magic = RIEBL_MAGIC;
1330 slow_memcpy(RIEBL_MAGIC_ADDR, &magic, sizeof(*RIEBL_MAGIC_ADDR));
1331 }
1332 #endif
1333 return( 0 );
1334 }
1335
1336
1337 #ifdef MODULE
1338 static struct net_device bagetlance_dev;
1339
1340 int init_module(void)
1341
1342 { int err;
1343
1344 bagetlance_dev.init = bagetlance_probe;
1345 if ((err = register_netdev( &bagetlance_dev ))) {
1346 if (err == -EIO) {
1347 printk( "No Vme Lance board found. Module not loaded.\n");
1348 }
1349 return( err );
1350 }
1351 return( 0 );
1352 }
1353
1354 void cleanup_module(void)
1355
1356 {
1357 unregister_netdev( &bagetlance_dev );
1358 }
1359
1360 #endif /* MODULE */
1361
1362 /*
1363 * Local variables:
1364 * c-indent-level: 4
1365 * tab-width: 4
1366 * End:
1367 */
1368