1 /* 82596.c: A generic 82596 ethernet driver for linux. */
2 /*
3    Based on Apricot.c
4    Written 1994 by Mark Evans.
5    This driver is for the Apricot 82596 bus-master interface
6 
7    Modularised 12/94 Mark Evans
8 
9 
10    Modified to support the 82596 ethernet chips on 680x0 VME boards.
11    by Richard Hirst <richard@sleepie.demon.co.uk>
12    Renamed to be 82596.c
13 
14    980825:  Changed to receive directly in to sk_buffs which are
15    allocated at open() time.  Eliminates copy on incoming frames
16    (small ones are still copied).  Shared data now held in a
17    non-cached page, so we can run on 68060 in copyback mode.
18 
19    TBD:
20    * look at deferring rx frames rather than discarding (as per tulip)
21    * handle tx ring full as per tulip
22    * performance test to tune rx_copybreak
23 
24    Most of my modifications relate to the braindead big-endian
25    implementation by Intel.  When the i596 is operating in
26    'big-endian' mode, it thinks a 32 bit value of 0x12345678
27    should be stored as 0x56781234.  This is a real pain, when
28    you have linked lists which are shared by the 680x0 and the
29    i596.
30 
31    Driver skeleton
32    Written 1993 by Donald Becker.
33    Copyright 1993 United States Government as represented by the Director,
34    National Security Agency. This software may only be used and distributed
35    according to the terms of the GNU General Public License as modified by SRC,
36    incorporated herein by reference.
37 
38    The author may be reached as becker@scyld.com, or C/O
39    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
40 
41  */
42 
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/string.h>
46 #include <linux/errno.h>
47 #include <linux/ioport.h>
48 #include <linux/interrupt.h>
49 #include <linux/delay.h>
50 #include <linux/netdevice.h>
51 #include <linux/etherdevice.h>
52 #include <linux/skbuff.h>
53 #include <linux/init.h>
54 #include <linux/bitops.h>
55 #include <linux/gfp.h>
56 
57 #include <asm/io.h>
58 #include <asm/dma.h>
59 #include <asm/pgtable.h>
60 #include <asm/cacheflush.h>
61 
62 static char version[] __initdata =
63 	"82596.c $Revision: 1.5 $\n";
64 
65 #define DRV_NAME	"82596"
66 
67 /* DEBUG flags
68  */
69 
70 #define DEB_INIT	0x0001
71 #define DEB_PROBE	0x0002
72 #define DEB_SERIOUS	0x0004
73 #define DEB_ERRORS	0x0008
74 #define DEB_MULTI	0x0010
75 #define DEB_TDR		0x0020
76 #define DEB_OPEN	0x0040
77 #define DEB_RESET	0x0080
78 #define DEB_ADDCMD	0x0100
79 #define DEB_STATUS	0x0200
80 #define DEB_STARTTX	0x0400
81 #define DEB_RXADDR	0x0800
82 #define DEB_TXADDR	0x1000
83 #define DEB_RXFRAME	0x2000
84 #define DEB_INTS	0x4000
85 #define DEB_STRUCT	0x8000
86 #define DEB_ANY		0xffff
87 
88 
89 #define DEB(x,y)	if (i596_debug & (x)) y
90 
91 
92 #if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
93 #define ENABLE_MVME16x_NET
94 #endif
95 #if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
96 #define ENABLE_BVME6000_NET
97 #endif
98 #if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
99 #define ENABLE_APRICOT
100 #endif
101 
102 #ifdef ENABLE_MVME16x_NET
103 #include <asm/mvme16xhw.h>
104 #endif
105 #ifdef ENABLE_BVME6000_NET
106 #include <asm/bvme6000hw.h>
107 #endif
108 
109 /*
110  * Define various macros for Channel Attention, word swapping etc., dependent
111  * on architecture.  MVME and BVME are 680x0 based, otherwise it is Intel.
112  */
113 
114 #ifdef __mc68000__
115 #define WSWAPrfd(x)  ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
116 #define WSWAPrbd(x)  ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
117 #define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
118 #define WSWAPscb(x)  ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
119 #define WSWAPcmd(x)  ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
120 #define WSWAPtbd(x)  ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
121 #define WSWAPchar(x) ((char *)            (((u32)(x)<<16) | ((((u32)(x)))>>16)))
122 #define ISCP_BUSY	0x00010000
123 #define MACH_IS_APRICOT	0
124 #else
125 #define WSWAPrfd(x)     ((struct i596_rfd *)((long)x))
126 #define WSWAPrbd(x)     ((struct i596_rbd *)((long)x))
127 #define WSWAPiscp(x)    ((struct i596_iscp *)((long)x))
128 #define WSWAPscb(x)     ((struct i596_scb *)((long)x))
129 #define WSWAPcmd(x)     ((struct i596_cmd *)((long)x))
130 #define WSWAPtbd(x)     ((struct i596_tbd *)((long)x))
131 #define WSWAPchar(x)    ((char *)((long)x))
132 #define ISCP_BUSY	0x0001
133 #define MACH_IS_APRICOT	1
134 #endif
135 
136 /*
137  * The MPU_PORT command allows direct access to the 82596. With PORT access
138  * the following commands are available (p5-18). The 32-bit port command
139  * must be word-swapped with the most significant word written first.
140  * This only applies to VME boards.
141  */
142 #define PORT_RESET		0x00	/* reset 82596 */
143 #define PORT_SELFTEST		0x01	/* selftest */
144 #define PORT_ALTSCP		0x02	/* alternate SCB address */
145 #define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
146 
147 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
148 
149 MODULE_AUTHOR("Richard Hirst");
150 MODULE_DESCRIPTION("i82596 driver");
151 MODULE_LICENSE("GPL");
152 
153 module_param(i596_debug, int, 0);
154 MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
155 
156 
157 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
158  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
159  */
160 static int rx_copybreak = 100;
161 
162 #define PKT_BUF_SZ	1536
163 #define MAX_MC_CNT	64
164 
165 #define I596_TOTAL_SIZE 17
166 
167 #define I596_NULL ((void *)0xffffffff)
168 
169 #define CMD_EOL		0x8000	/* The last command of the list, stop. */
170 #define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
171 #define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
172 
173 #define CMD_FLEX	0x0008	/* Enable flexible memory model */
174 
175 enum commands {
176 	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
177 	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
178 };
179 
180 #define STAT_C		0x8000	/* Set to 0 after execution */
181 #define STAT_B		0x4000	/* Command being executed */
182 #define STAT_OK		0x2000	/* Command executed ok */
183 #define STAT_A		0x1000	/* Command aborted */
184 
185 #define	 CUC_START	0x0100
186 #define	 CUC_RESUME	0x0200
187 #define	 CUC_SUSPEND    0x0300
188 #define	 CUC_ABORT	0x0400
189 #define	 RX_START	0x0010
190 #define	 RX_RESUME	0x0020
191 #define	 RX_SUSPEND	0x0030
192 #define	 RX_ABORT	0x0040
193 
194 #define TX_TIMEOUT	(HZ/20)
195 
196 
197 struct i596_reg {
198 	unsigned short porthi;
199 	unsigned short portlo;
200 	unsigned long ca;
201 };
202 
203 #define EOF		0x8000
204 #define SIZE_MASK	0x3fff
205 
206 struct i596_tbd {
207 	unsigned short size;
208 	unsigned short pad;
209 	struct i596_tbd *next;
210 	char *data;
211 };
212 
213 /* The command structure has two 'next' pointers; v_next is the address of
214  * the next command as seen by the CPU, b_next is the address of the next
215  * command as seen by the 82596.  The b_next pointer, as used by the 82596
216  * always references the status field of the next command, rather than the
217  * v_next field, because the 82596 is unaware of v_next.  It may seem more
218  * logical to put v_next at the end of the structure, but we cannot do that
219  * because the 82596 expects other fields to be there, depending on command
220  * type.
221  */
222 
223 struct i596_cmd {
224 	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
225 	unsigned short status;
226 	unsigned short command;
227 	struct i596_cmd *b_next;	/* Address from i596 viewpoint */
228 };
229 
230 struct tx_cmd {
231 	struct i596_cmd cmd;
232 	struct i596_tbd *tbd;
233 	unsigned short size;
234 	unsigned short pad;
235 	struct sk_buff *skb;	/* So we can free it after tx */
236 };
237 
238 struct tdr_cmd {
239 	struct i596_cmd cmd;
240 	unsigned short status;
241 	unsigned short pad;
242 };
243 
244 struct mc_cmd {
245 	struct i596_cmd cmd;
246 	short mc_cnt;
247 	char mc_addrs[MAX_MC_CNT*6];
248 };
249 
250 struct sa_cmd {
251 	struct i596_cmd cmd;
252 	char eth_addr[8];
253 };
254 
255 struct cf_cmd {
256 	struct i596_cmd cmd;
257 	char i596_config[16];
258 };
259 
260 struct i596_rfd {
261 	unsigned short stat;
262 	unsigned short cmd;
263 	struct i596_rfd *b_next;	/* Address from i596 viewpoint */
264 	struct i596_rbd *rbd;
265 	unsigned short count;
266 	unsigned short size;
267 	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
268 	struct i596_rfd *v_prev;
269 };
270 
271 struct i596_rbd {
272     unsigned short count;
273     unsigned short zero1;
274     struct i596_rbd *b_next;
275     unsigned char *b_data;		/* Address from i596 viewpoint */
276     unsigned short size;
277     unsigned short zero2;
278     struct sk_buff *skb;
279     struct i596_rbd *v_next;
280     struct i596_rbd *b_addr;		/* This rbd addr from i596 view */
281     unsigned char *v_data;		/* Address from CPUs viewpoint */
282 };
283 
284 #define TX_RING_SIZE 64
285 #define RX_RING_SIZE 16
286 
287 struct i596_scb {
288 	unsigned short status;
289 	unsigned short command;
290 	struct i596_cmd *cmd;
291 	struct i596_rfd *rfd;
292 	unsigned long crc_err;
293 	unsigned long align_err;
294 	unsigned long resource_err;
295 	unsigned long over_err;
296 	unsigned long rcvdt_err;
297 	unsigned long short_err;
298 	unsigned short t_on;
299 	unsigned short t_off;
300 };
301 
302 struct i596_iscp {
303 	unsigned long stat;
304 	struct i596_scb *scb;
305 };
306 
307 struct i596_scp {
308 	unsigned long sysbus;
309 	unsigned long pad;
310 	struct i596_iscp *iscp;
311 };
312 
313 struct i596_private {
314 	volatile struct i596_scp scp;
315 	volatile struct i596_iscp iscp;
316 	volatile struct i596_scb scb;
317 	struct sa_cmd sa_cmd;
318 	struct cf_cmd cf_cmd;
319 	struct tdr_cmd tdr_cmd;
320 	struct mc_cmd mc_cmd;
321 	unsigned long stat;
322 	int last_restart __attribute__((aligned(4)));
323 	struct i596_rfd *rfd_head;
324 	struct i596_rbd *rbd_head;
325 	struct i596_cmd *cmd_tail;
326 	struct i596_cmd *cmd_head;
327 	int cmd_backlog;
328 	unsigned long last_cmd;
329 	struct i596_rfd rfds[RX_RING_SIZE];
330 	struct i596_rbd rbds[RX_RING_SIZE];
331 	struct tx_cmd tx_cmds[TX_RING_SIZE];
332 	struct i596_tbd tbds[TX_RING_SIZE];
333 	int next_tx_cmd;
334 	spinlock_t lock;
335 };
336 
337 static char init_setup[] =
338 {
339 	0x8E,			/* length, prefetch on */
340 	0xC8,			/* fifo to 8, monitor off */
341 #ifdef CONFIG_VME
342 	0xc0,			/* don't save bad frames */
343 #else
344 	0x80,			/* don't save bad frames */
345 #endif
346 	0x2E,			/* No source address insertion, 8 byte preamble */
347 	0x00,			/* priority and backoff defaults */
348 	0x60,			/* interframe spacing */
349 	0x00,			/* slot time LSB */
350 	0xf2,			/* slot time and retries */
351 	0x00,			/* promiscuous mode */
352 	0x00,			/* collision detect */
353 	0x40,			/* minimum frame length */
354 	0xff,
355 	0x00,
356 	0x7f /*  *multi IA */ };
357 
358 static int i596_open(struct net_device *dev);
359 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
360 static irqreturn_t i596_interrupt(int irq, void *dev_id);
361 static int i596_close(struct net_device *dev);
362 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
363 static void i596_tx_timeout (struct net_device *dev);
364 static void print_eth(unsigned char *buf, char *str);
365 static void set_multicast_list(struct net_device *dev);
366 
367 static int rx_ring_size = RX_RING_SIZE;
368 static int ticks_limit = 25;
369 static int max_cmd_backlog = TX_RING_SIZE-1;
370 
371 
CA(struct net_device * dev)372 static inline void CA(struct net_device *dev)
373 {
374 #ifdef ENABLE_MVME16x_NET
375 	if (MACH_IS_MVME16x) {
376 		((struct i596_reg *) dev->base_addr)->ca = 1;
377 	}
378 #endif
379 #ifdef ENABLE_BVME6000_NET
380 	if (MACH_IS_BVME6000) {
381 		volatile u32 i;
382 
383 		i = *(volatile u32 *) (dev->base_addr);
384 	}
385 #endif
386 #ifdef ENABLE_APRICOT
387 	if (MACH_IS_APRICOT) {
388 		outw(0, (short) (dev->base_addr) + 4);
389 	}
390 #endif
391 }
392 
393 
MPU_PORT(struct net_device * dev,int c,volatile void * x)394 static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
395 {
396 #ifdef ENABLE_MVME16x_NET
397 	if (MACH_IS_MVME16x) {
398 		struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
399 		p->porthi = ((c) | (u32) (x)) & 0xffff;
400 		p->portlo = ((c) | (u32) (x)) >> 16;
401 	}
402 #endif
403 #ifdef ENABLE_BVME6000_NET
404 	if (MACH_IS_BVME6000) {
405 		u32 v = (u32) (c) | (u32) (x);
406 		v = ((u32) (v) << 16) | ((u32) (v) >> 16);
407 		*(volatile u32 *) dev->base_addr = v;
408 		udelay(1);
409 		*(volatile u32 *) dev->base_addr = v;
410 	}
411 #endif
412 }
413 
414 
wait_istat(struct net_device * dev,struct i596_private * lp,int delcnt,char * str)415 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
416 {
417 	while (--delcnt && lp->iscp.stat)
418 		udelay(10);
419 	if (!delcnt) {
420 		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
421 		     dev->name, str, lp->scb.status, lp->scb.command);
422 		return -1;
423 	}
424 	else
425 		return 0;
426 }
427 
428 
wait_cmd(struct net_device * dev,struct i596_private * lp,int delcnt,char * str)429 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
430 {
431 	while (--delcnt && lp->scb.command)
432 		udelay(10);
433 	if (!delcnt) {
434 		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
435 		     dev->name, str, lp->scb.status, lp->scb.command);
436 		return -1;
437 	}
438 	else
439 		return 0;
440 }
441 
442 
wait_cfg(struct net_device * dev,struct i596_cmd * cmd,int delcnt,char * str)443 static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
444 {
445 	volatile struct i596_cmd *c = cmd;
446 
447 	while (--delcnt && c->command)
448 		udelay(10);
449 	if (!delcnt) {
450 		printk(KERN_ERR "%s: %s.\n", dev->name, str);
451 		return -1;
452 	}
453 	else
454 		return 0;
455 }
456 
457 
i596_display_data(struct net_device * dev)458 static void i596_display_data(struct net_device *dev)
459 {
460 	struct i596_private *lp = dev->ml_priv;
461 	struct i596_cmd *cmd;
462 	struct i596_rfd *rfd;
463 	struct i596_rbd *rbd;
464 
465 	printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
466 	       &lp->scp, lp->scp.sysbus, lp->scp.iscp);
467 	printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n",
468 	       &lp->iscp, lp->iscp.stat, lp->iscp.scb);
469 	printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x,"
470 		" .cmd = %p, .rfd = %p\n",
471 	       &lp->scb, lp->scb.status, lp->scb.command,
472 		lp->scb.cmd, lp->scb.rfd);
473 	printk(KERN_ERR "   errors: crc %lx, align %lx, resource %lx,"
474                " over %lx, rcvdt %lx, short %lx\n",
475 		lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
476 		lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
477 	cmd = lp->cmd_head;
478 	while (cmd != I596_NULL) {
479 		printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
480 		  cmd, cmd->status, cmd->command, cmd->b_next);
481 		cmd = cmd->v_next;
482 	}
483 	rfd = lp->rfd_head;
484 	printk(KERN_ERR "rfd_head = %p\n", rfd);
485 	do {
486 		printk(KERN_ERR "   %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
487                         " count %04x\n",
488 			rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
489 			rfd->count);
490 		rfd = rfd->v_next;
491 	} while (rfd != lp->rfd_head);
492 	rbd = lp->rbd_head;
493 	printk(KERN_ERR "rbd_head = %p\n", rbd);
494 	do {
495 		printk(KERN_ERR "   %p .count %04x, b_next %p, b_data %p, size %04x\n",
496 			rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
497 		rbd = rbd->v_next;
498 	} while (rbd != lp->rbd_head);
499 }
500 
501 
502 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
i596_error(int irq,void * dev_id)503 static irqreturn_t i596_error(int irq, void *dev_id)
504 {
505 	struct net_device *dev = dev_id;
506 #ifdef ENABLE_MVME16x_NET
507 	if (MACH_IS_MVME16x) {
508 		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
509 
510 		pcc2[0x28] = 1;
511 		pcc2[0x2b] = 0x1d;
512 	}
513 #endif
514 #ifdef ENABLE_BVME6000_NET
515 	if (MACH_IS_BVME6000) {
516 		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
517 
518 		*ethirq = 1;
519 		*ethirq = 3;
520 	}
521 #endif
522 	printk(KERN_ERR "%s: Error interrupt\n", dev->name);
523 	i596_display_data(dev);
524 	return IRQ_HANDLED;
525 }
526 #endif
527 
remove_rx_bufs(struct net_device * dev)528 static inline void remove_rx_bufs(struct net_device *dev)
529 {
530 	struct i596_private *lp = dev->ml_priv;
531 	struct i596_rbd *rbd;
532 	int i;
533 
534 	for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
535 		if (rbd->skb == NULL)
536 			break;
537 		dev_kfree_skb(rbd->skb);
538 		rbd->skb = NULL;
539 	}
540 }
541 
init_rx_bufs(struct net_device * dev)542 static inline int init_rx_bufs(struct net_device *dev)
543 {
544 	struct i596_private *lp = dev->ml_priv;
545 	int i;
546 	struct i596_rfd *rfd;
547 	struct i596_rbd *rbd;
548 
549 	/* First build the Receive Buffer Descriptor List */
550 
551 	for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
552 		struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
553 
554 		if (skb == NULL) {
555 			remove_rx_bufs(dev);
556 			return -ENOMEM;
557 		}
558 
559 		skb->dev = dev;
560 		rbd->v_next = rbd+1;
561 		rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
562 		rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
563 		rbd->skb = skb;
564 		rbd->v_data = skb->data;
565 		rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
566 		rbd->size = PKT_BUF_SZ;
567 #ifdef __mc68000__
568 		cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
569 #endif
570 	}
571 	lp->rbd_head = lp->rbds;
572 	rbd = lp->rbds + rx_ring_size - 1;
573 	rbd->v_next = lp->rbds;
574 	rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
575 
576 	/* Now build the Receive Frame Descriptor List */
577 
578 	for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
579 		rfd->rbd = I596_NULL;
580 		rfd->v_next = rfd+1;
581 		rfd->v_prev = rfd-1;
582 		rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
583 		rfd->cmd = CMD_FLEX;
584 	}
585 	lp->rfd_head = lp->rfds;
586 	lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
587 	rfd = lp->rfds;
588 	rfd->rbd = lp->rbd_head;
589 	rfd->v_prev = lp->rfds + rx_ring_size - 1;
590 	rfd = lp->rfds + rx_ring_size - 1;
591 	rfd->v_next = lp->rfds;
592 	rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
593 	rfd->cmd = CMD_EOL|CMD_FLEX;
594 
595 	return 0;
596 }
597 
598 
rebuild_rx_bufs(struct net_device * dev)599 static void rebuild_rx_bufs(struct net_device *dev)
600 {
601 	struct i596_private *lp = dev->ml_priv;
602 	int i;
603 
604 	/* Ensure rx frame/buffer descriptors are tidy */
605 
606 	for (i = 0; i < rx_ring_size; i++) {
607 		lp->rfds[i].rbd = I596_NULL;
608 		lp->rfds[i].cmd = CMD_FLEX;
609 	}
610 	lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
611 	lp->rfd_head = lp->rfds;
612 	lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
613 	lp->rbd_head = lp->rbds;
614 	lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
615 }
616 
617 
init_i596_mem(struct net_device * dev)618 static int init_i596_mem(struct net_device *dev)
619 {
620 	struct i596_private *lp = dev->ml_priv;
621 #if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT)
622 	short ioaddr = dev->base_addr;
623 #endif
624 	unsigned long flags;
625 
626 	MPU_PORT(dev, PORT_RESET, NULL);
627 
628 	udelay(100);		/* Wait 100us - seems to help */
629 
630 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
631 #ifdef ENABLE_MVME16x_NET
632 	if (MACH_IS_MVME16x) {
633 		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
634 
635 		/* Disable all ints for now */
636 		pcc2[0x28] = 1;
637 		pcc2[0x2a] = 0x48;
638 		/* Following disables snooping.  Snooping is not required
639 		 * as we make appropriate use of non-cached pages for
640 		 * shared data, and cache_push/cache_clear.
641 		 */
642 		pcc2[0x2b] = 0x08;
643 	}
644 #endif
645 #ifdef ENABLE_BVME6000_NET
646 	if (MACH_IS_BVME6000) {
647 		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
648 
649 		*ethirq = 1;
650 	}
651 #endif
652 
653 	/* change the scp address */
654 
655 	MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
656 
657 #elif defined(ENABLE_APRICOT)
658 
659 	{
660 		u32 scp = virt_to_bus(&lp->scp);
661 
662 		/* change the scp address */
663 		outw(0, ioaddr);
664 		outw(0, ioaddr);
665 		outb(4, ioaddr + 0xf);
666 		outw(scp | 2, ioaddr);
667 		outw(scp >> 16, ioaddr);
668 	}
669 #endif
670 
671 	lp->last_cmd = jiffies;
672 
673 #ifdef ENABLE_MVME16x_NET
674 	if (MACH_IS_MVME16x)
675 		lp->scp.sysbus = 0x00000054;
676 #endif
677 #ifdef ENABLE_BVME6000_NET
678 	if (MACH_IS_BVME6000)
679 		lp->scp.sysbus = 0x0000004c;
680 #endif
681 #ifdef ENABLE_APRICOT
682 	if (MACH_IS_APRICOT)
683 		lp->scp.sysbus = 0x00440000;
684 #endif
685 
686 	lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
687 	lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
688 	lp->iscp.stat = ISCP_BUSY;
689 	lp->cmd_backlog = 0;
690 
691 	lp->cmd_head = lp->scb.cmd = I596_NULL;
692 
693 #ifdef ENABLE_BVME6000_NET
694 	if (MACH_IS_BVME6000) {
695 		lp->scb.t_on  = 7 * 25;
696 		lp->scb.t_off = 1 * 25;
697 	}
698 #endif
699 
700 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
701 
702 #if defined(ENABLE_APRICOT)
703 	(void) inb(ioaddr + 0x10);
704 	outb(4, ioaddr + 0xf);
705 #endif
706 	CA(dev);
707 
708 	if (wait_istat(dev,lp,1000,"initialization timed out"))
709 		goto failed;
710 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
711 
712 	/* Ensure rx frame/buffer descriptors are tidy */
713 	rebuild_rx_bufs(dev);
714 	lp->scb.command = 0;
715 
716 #ifdef ENABLE_MVME16x_NET
717 	if (MACH_IS_MVME16x) {
718 		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
719 
720 		/* Enable ints, etc. now */
721 		pcc2[0x2a] = 0x55;	/* Edge sensitive */
722 		pcc2[0x2b] = 0x15;
723 	}
724 #endif
725 #ifdef ENABLE_BVME6000_NET
726 	if (MACH_IS_BVME6000) {
727 		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
728 
729 		*ethirq = 3;
730 	}
731 #endif
732 
733 
734 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
735 	memcpy(lp->cf_cmd.i596_config, init_setup, 14);
736 	lp->cf_cmd.cmd.command = CmdConfigure;
737 	i596_add_cmd(dev, &lp->cf_cmd.cmd);
738 
739 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
740 	memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
741 	lp->sa_cmd.cmd.command = CmdSASetup;
742 	i596_add_cmd(dev, &lp->sa_cmd.cmd);
743 
744 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
745 	lp->tdr_cmd.cmd.command = CmdTDR;
746 	i596_add_cmd(dev, &lp->tdr_cmd.cmd);
747 
748 	spin_lock_irqsave (&lp->lock, flags);
749 
750 	if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
751 		spin_unlock_irqrestore (&lp->lock, flags);
752 		goto failed;
753 	}
754 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
755 	lp->scb.command = RX_START;
756 	CA(dev);
757 
758 	spin_unlock_irqrestore (&lp->lock, flags);
759 
760 	if (wait_cmd(dev,lp,1000,"RX_START not processed"))
761 		goto failed;
762 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
763 	return 0;
764 
765 failed:
766 	printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
767 	MPU_PORT(dev, PORT_RESET, NULL);
768 	return -1;
769 }
770 
i596_rx(struct net_device * dev)771 static inline int i596_rx(struct net_device *dev)
772 {
773 	struct i596_private *lp = dev->ml_priv;
774 	struct i596_rfd *rfd;
775 	struct i596_rbd *rbd;
776 	int frames = 0;
777 
778 	DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n",
779 			lp->rfd_head, lp->rbd_head));
780 
781 	rfd = lp->rfd_head;		/* Ref next frame to check */
782 
783 	while ((rfd->stat) & STAT_C) {	/* Loop while complete frames */
784 		if (rfd->rbd == I596_NULL)
785 			rbd = I596_NULL;
786 		else if (rfd->rbd == lp->rbd_head->b_addr)
787 			rbd = lp->rbd_head;
788 		else {
789 			printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
790 			/* XXX Now what? */
791 			rbd = I596_NULL;
792 		}
793 		DEB(DEB_RXFRAME, printk(KERN_DEBUG "  rfd %p, rfd.rbd %p, rfd.stat %04x\n",
794 			rfd, rfd->rbd, rfd->stat));
795 
796 		if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
797 			/* a good frame */
798 			int pkt_len = rbd->count & 0x3fff;
799 			struct sk_buff *skb = rbd->skb;
800 			int rx_in_place = 0;
801 
802 			DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
803 			frames++;
804 
805 			/* Check if the packet is long enough to just accept
806 			 * without copying to a properly sized skbuff.
807 			 */
808 
809 			if (pkt_len > rx_copybreak) {
810 				struct sk_buff *newskb;
811 
812 				/* Get fresh skbuff to replace filled one. */
813 				newskb = dev_alloc_skb(PKT_BUF_SZ);
814 				if (newskb == NULL) {
815 					skb = NULL;	/* drop pkt */
816 					goto memory_squeeze;
817 				}
818 				/* Pass up the skb already on the Rx ring. */
819 				skb_put(skb, pkt_len);
820 				rx_in_place = 1;
821 				rbd->skb = newskb;
822 				newskb->dev = dev;
823 				rbd->v_data = newskb->data;
824 				rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
825 #ifdef __mc68000__
826 				cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
827 #endif
828 			}
829 			else
830 				skb = dev_alloc_skb(pkt_len + 2);
831 memory_squeeze:
832 			if (skb == NULL) {
833 				/* XXX tulip.c can defer packets here!! */
834 				printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
835 				dev->stats.rx_dropped++;
836 			}
837 			else {
838 				if (!rx_in_place) {
839 					/* 16 byte align the data fields */
840 					skb_reserve(skb, 2);
841 					memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
842 				}
843 				skb->protocol=eth_type_trans(skb,dev);
844 				skb->len = pkt_len;
845 #ifdef __mc68000__
846 				cache_clear(virt_to_phys(rbd->skb->data),
847 						pkt_len);
848 #endif
849 				netif_rx(skb);
850 				dev->stats.rx_packets++;
851 				dev->stats.rx_bytes+=pkt_len;
852 			}
853 		}
854 		else {
855 			DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n",
856 					dev->name, rfd->stat));
857 			dev->stats.rx_errors++;
858 			if ((rfd->stat) & 0x0001)
859 				dev->stats.collisions++;
860 			if ((rfd->stat) & 0x0080)
861 				dev->stats.rx_length_errors++;
862 			if ((rfd->stat) & 0x0100)
863 				dev->stats.rx_over_errors++;
864 			if ((rfd->stat) & 0x0200)
865 				dev->stats.rx_fifo_errors++;
866 			if ((rfd->stat) & 0x0400)
867 				dev->stats.rx_frame_errors++;
868 			if ((rfd->stat) & 0x0800)
869 				dev->stats.rx_crc_errors++;
870 			if ((rfd->stat) & 0x1000)
871 				dev->stats.rx_length_errors++;
872 		}
873 
874 		/* Clear the buffer descriptor count and EOF + F flags */
875 
876 		if (rbd != I596_NULL && (rbd->count & 0x4000)) {
877 			rbd->count = 0;
878 			lp->rbd_head = rbd->v_next;
879 		}
880 
881 		/* Tidy the frame descriptor, marking it as end of list */
882 
883 		rfd->rbd = I596_NULL;
884 		rfd->stat = 0;
885 		rfd->cmd = CMD_EOL|CMD_FLEX;
886 		rfd->count = 0;
887 
888 		/* Remove end-of-list from old end descriptor */
889 
890 		rfd->v_prev->cmd = CMD_FLEX;
891 
892 		/* Update record of next frame descriptor to process */
893 
894 		lp->scb.rfd = rfd->b_next;
895 		lp->rfd_head = rfd->v_next;
896 		rfd = lp->rfd_head;
897 	}
898 
899 	DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames));
900 
901 	return 0;
902 }
903 
904 
i596_cleanup_cmd(struct net_device * dev,struct i596_private * lp)905 static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
906 {
907 	struct i596_cmd *ptr;
908 
909 	while (lp->cmd_head != I596_NULL) {
910 		ptr = lp->cmd_head;
911 		lp->cmd_head = ptr->v_next;
912 		lp->cmd_backlog--;
913 
914 		switch ((ptr->command) & 0x7) {
915 		case CmdTx:
916 			{
917 				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
918 				struct sk_buff *skb = tx_cmd->skb;
919 
920 				dev_kfree_skb(skb);
921 
922 				dev->stats.tx_errors++;
923 				dev->stats.tx_aborted_errors++;
924 
925 				ptr->v_next = ptr->b_next = I596_NULL;
926 				tx_cmd->cmd.command = 0;  /* Mark as free */
927 				break;
928 			}
929 		default:
930 			ptr->v_next = ptr->b_next = I596_NULL;
931 		}
932 	}
933 
934 	wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
935 	lp->scb.cmd = I596_NULL;
936 }
937 
i596_reset(struct net_device * dev,struct i596_private * lp,int ioaddr)938 static void i596_reset(struct net_device *dev, struct i596_private *lp,
939 			int ioaddr)
940 {
941 	unsigned long flags;
942 
943 	DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n"));
944 
945 	spin_lock_irqsave (&lp->lock, flags);
946 
947 	wait_cmd(dev,lp,100,"i596_reset timed out");
948 
949 	netif_stop_queue(dev);
950 
951 	lp->scb.command = CUC_ABORT | RX_ABORT;
952 	CA(dev);
953 
954 	/* wait for shutdown */
955 	wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
956 	spin_unlock_irqrestore (&lp->lock, flags);
957 
958 	i596_cleanup_cmd(dev,lp);
959 	i596_rx(dev);
960 
961 	netif_start_queue(dev);
962 	init_i596_mem(dev);
963 }
964 
i596_add_cmd(struct net_device * dev,struct i596_cmd * cmd)965 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
966 {
967 	struct i596_private *lp = dev->ml_priv;
968 	int ioaddr = dev->base_addr;
969 	unsigned long flags;
970 
971 	DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n"));
972 
973 	cmd->status = 0;
974 	cmd->command |= (CMD_EOL | CMD_INTR);
975 	cmd->v_next = cmd->b_next = I596_NULL;
976 
977 	spin_lock_irqsave (&lp->lock, flags);
978 
979 	if (lp->cmd_head != I596_NULL) {
980 		lp->cmd_tail->v_next = cmd;
981 		lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
982 	} else {
983 		lp->cmd_head = cmd;
984 		wait_cmd(dev,lp,100,"i596_add_cmd timed out");
985 		lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
986 		lp->scb.command = CUC_START;
987 		CA(dev);
988 	}
989 	lp->cmd_tail = cmd;
990 	lp->cmd_backlog++;
991 
992 	spin_unlock_irqrestore (&lp->lock, flags);
993 
994 	if (lp->cmd_backlog > max_cmd_backlog) {
995 		unsigned long tickssofar = jiffies - lp->last_cmd;
996 
997 		if (tickssofar < ticks_limit)
998 			return;
999 
1000 		printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
1001 
1002 		i596_reset(dev, lp, ioaddr);
1003 	}
1004 }
1005 
i596_open(struct net_device * dev)1006 static int i596_open(struct net_device *dev)
1007 {
1008 	int res = 0;
1009 
1010 	DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
1011 
1012 	if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
1013 		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
1014 		return -EAGAIN;
1015 	}
1016 #ifdef ENABLE_MVME16x_NET
1017 	if (MACH_IS_MVME16x) {
1018 		if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) {
1019 			res = -EAGAIN;
1020 			goto err_irq_dev;
1021 		}
1022 	}
1023 #endif
1024 	res = init_rx_bufs(dev);
1025 	if (res)
1026 		goto err_irq_56;
1027 
1028 	netif_start_queue(dev);
1029 
1030 	if (init_i596_mem(dev)) {
1031 		res = -EAGAIN;
1032 		goto err_queue;
1033 	}
1034 
1035 	return 0;
1036 
1037 err_queue:
1038 	netif_stop_queue(dev);
1039 	remove_rx_bufs(dev);
1040 err_irq_56:
1041 #ifdef ENABLE_MVME16x_NET
1042 	free_irq(0x56, dev);
1043 err_irq_dev:
1044 #endif
1045 	free_irq(dev->irq, dev);
1046 
1047 	return res;
1048 }
1049 
i596_tx_timeout(struct net_device * dev)1050 static void i596_tx_timeout (struct net_device *dev)
1051 {
1052 	struct i596_private *lp = dev->ml_priv;
1053 	int ioaddr = dev->base_addr;
1054 
1055 	/* Transmitter timeout, serious problems. */
1056 	DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n",
1057 			dev->name));
1058 
1059 	dev->stats.tx_errors++;
1060 
1061 	/* Try to restart the adaptor */
1062 	if (lp->last_restart == dev->stats.tx_packets) {
1063 		DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n"));
1064 		/* Shutdown and restart */
1065 		i596_reset (dev, lp, ioaddr);
1066 	} else {
1067 		/* Issue a channel attention signal */
1068 		DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n"));
1069 		lp->scb.command = CUC_START | RX_START;
1070 		CA (dev);
1071 		lp->last_restart = dev->stats.tx_packets;
1072 	}
1073 
1074 	dev->trans_start = jiffies; /* prevent tx timeout */
1075 	netif_wake_queue (dev);
1076 }
1077 
i596_start_xmit(struct sk_buff * skb,struct net_device * dev)1078 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1079 {
1080 	struct i596_private *lp = dev->ml_priv;
1081 	struct tx_cmd *tx_cmd;
1082 	struct i596_tbd *tbd;
1083 	short length = skb->len;
1084 
1085 	DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
1086 				dev->name, skb->len, skb->data));
1087 
1088 	if (skb->len < ETH_ZLEN) {
1089 		if (skb_padto(skb, ETH_ZLEN))
1090 			return NETDEV_TX_OK;
1091 		length = ETH_ZLEN;
1092 	}
1093 	netif_stop_queue(dev);
1094 
1095 	tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1096 	tbd = lp->tbds + lp->next_tx_cmd;
1097 
1098 	if (tx_cmd->cmd.command) {
1099 		printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n",
1100 				dev->name);
1101 		dev->stats.tx_dropped++;
1102 
1103 		dev_kfree_skb(skb);
1104 	} else {
1105 		if (++lp->next_tx_cmd == TX_RING_SIZE)
1106 			lp->next_tx_cmd = 0;
1107 		tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
1108 		tbd->next = I596_NULL;
1109 
1110 		tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1111 		tx_cmd->skb = skb;
1112 
1113 		tx_cmd->pad = 0;
1114 		tx_cmd->size = 0;
1115 		tbd->pad = 0;
1116 		tbd->size = EOF | length;
1117 
1118 		tbd->data = WSWAPchar(virt_to_bus(skb->data));
1119 
1120 #ifdef __mc68000__
1121 		cache_push(virt_to_phys(skb->data), length);
1122 #endif
1123 		DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1124 		i596_add_cmd(dev, &tx_cmd->cmd);
1125 
1126 		dev->stats.tx_packets++;
1127 		dev->stats.tx_bytes += length;
1128 	}
1129 
1130 	netif_start_queue(dev);
1131 
1132 	return NETDEV_TX_OK;
1133 }
1134 
print_eth(unsigned char * add,char * str)1135 static void print_eth(unsigned char *add, char *str)
1136 {
1137 	printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1138 	       add, add + 6, add, add[12], add[13], str);
1139 }
1140 
1141 static int io = 0x300;
1142 static int irq = 10;
1143 
1144 static const struct net_device_ops i596_netdev_ops = {
1145 	.ndo_open 		= i596_open,
1146 	.ndo_stop		= i596_close,
1147 	.ndo_start_xmit		= i596_start_xmit,
1148 	.ndo_set_multicast_list = set_multicast_list,
1149 	.ndo_tx_timeout		= i596_tx_timeout,
1150 	.ndo_change_mtu		= eth_change_mtu,
1151 	.ndo_set_mac_address 	= eth_mac_addr,
1152 	.ndo_validate_addr	= eth_validate_addr,
1153 };
1154 
i82596_probe(int unit)1155 struct net_device * __init i82596_probe(int unit)
1156 {
1157 	struct net_device *dev;
1158 	int i;
1159 	struct i596_private *lp;
1160 	char eth_addr[8];
1161 	static int probed;
1162 	int err;
1163 
1164 	if (probed)
1165 		return ERR_PTR(-ENODEV);
1166 	probed++;
1167 
1168 	dev = alloc_etherdev(0);
1169 	if (!dev)
1170 		return ERR_PTR(-ENOMEM);
1171 
1172 	if (unit >= 0) {
1173 		sprintf(dev->name, "eth%d", unit);
1174 		netdev_boot_setup_check(dev);
1175 	} else {
1176 		dev->base_addr = io;
1177 		dev->irq = irq;
1178 	}
1179 
1180 #ifdef ENABLE_MVME16x_NET
1181 	if (MACH_IS_MVME16x) {
1182 		if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
1183 			printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n");
1184 			err = -ENODEV;
1185 			goto out;
1186 		}
1187 		memcpy(eth_addr, (void *) 0xfffc1f2c, 6);	/* YUCK! Get addr from NOVRAM */
1188 		dev->base_addr = MVME_I596_BASE;
1189 		dev->irq = (unsigned) MVME16x_IRQ_I596;
1190 		goto found;
1191 	}
1192 #endif
1193 #ifdef ENABLE_BVME6000_NET
1194 	if (MACH_IS_BVME6000) {
1195 		volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
1196 		unsigned char msr = rtc[3];
1197 		int i;
1198 
1199 		rtc[3] |= 0x80;
1200 		for (i = 0; i < 6; i++)
1201 			eth_addr[i] = rtc[i * 4 + 7];	/* Stored in RTC RAM at offset 1 */
1202 		rtc[3] = msr;
1203 		dev->base_addr = BVME_I596_BASE;
1204 		dev->irq = (unsigned) BVME_IRQ_I596;
1205 		goto found;
1206 	}
1207 #endif
1208 #ifdef ENABLE_APRICOT
1209 	{
1210 		int checksum = 0;
1211 		int ioaddr = 0x300;
1212 
1213 		/* this is easy the ethernet interface can only be at 0x300 */
1214 		/* first check nothing is already registered here */
1215 
1216 		if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) {
1217 			printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr);
1218 			err = -EBUSY;
1219 			goto out;
1220 		}
1221 
1222 		dev->base_addr = ioaddr;
1223 
1224 		for (i = 0; i < 8; i++) {
1225 			eth_addr[i] = inb(ioaddr + 8 + i);
1226 			checksum += eth_addr[i];
1227 		}
1228 
1229 		/* checksum is a multiple of 0x100, got this wrong first time
1230 		   some machines have 0x100, some 0x200. The DOS driver doesn't
1231 		   even bother with the checksum.
1232 		   Some other boards trip the checksum.. but then appear as
1233 		   ether address 0. Trap these - AC */
1234 
1235 		if ((checksum % 0x100) ||
1236 		    (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
1237 			err = -ENODEV;
1238 			goto out1;
1239 		}
1240 
1241 		dev->irq = 10;
1242 		goto found;
1243 	}
1244 #endif
1245 	err = -ENODEV;
1246 	goto out;
1247 
1248 found:
1249 	dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1250 	if (!dev->mem_start) {
1251 		err = -ENOMEM;
1252 		goto out1;
1253 	}
1254 
1255 	DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1256 
1257 	for (i = 0; i < 6; i++)
1258 		DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1259 
1260 	DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1261 
1262 	DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
1263 
1264 	/* The 82596-specific entries in the device structure. */
1265 	dev->netdev_ops = &i596_netdev_ops;
1266 	dev->watchdog_timeo = TX_TIMEOUT;
1267 
1268 	dev->ml_priv = (void *)(dev->mem_start);
1269 
1270 	lp = dev->ml_priv;
1271 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), "
1272 			"lp->scb at 0x%08lx\n",
1273 			dev->name, (unsigned long)lp,
1274 			sizeof(struct i596_private), (unsigned long)&lp->scb));
1275 	memset((void *) lp, 0, sizeof(struct i596_private));
1276 
1277 #ifdef __mc68000__
1278 	cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
1279 	cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
1280 	kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1281 #endif
1282 	lp->scb.command = 0;
1283 	lp->scb.cmd = I596_NULL;
1284 	lp->scb.rfd = I596_NULL;
1285 	spin_lock_init(&lp->lock);
1286 
1287 	err = register_netdev(dev);
1288 	if (err)
1289 		goto out2;
1290 	return dev;
1291 out2:
1292 #ifdef __mc68000__
1293 	/* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1294 	 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1295 	 */
1296 	kernel_set_cachemode((void *)(dev->mem_start), 4096,
1297 			IOMAP_FULL_CACHING);
1298 #endif
1299 	free_page ((u32)(dev->mem_start));
1300 out1:
1301 #ifdef ENABLE_APRICOT
1302 	release_region(dev->base_addr, I596_TOTAL_SIZE);
1303 #endif
1304 out:
1305 	free_netdev(dev);
1306 	return ERR_PTR(err);
1307 }
1308 
i596_interrupt(int irq,void * dev_id)1309 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1310 {
1311 	struct net_device *dev = dev_id;
1312 	struct i596_private *lp;
1313 	short ioaddr;
1314 	unsigned short status, ack_cmd = 0;
1315 	int handled = 0;
1316 
1317 #ifdef ENABLE_BVME6000_NET
1318 	if (MACH_IS_BVME6000) {
1319 		if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
1320 			i596_error(irq, dev_id);
1321 			return IRQ_HANDLED;
1322 		}
1323 	}
1324 #endif
1325 	if (dev == NULL) {
1326 		printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq);
1327 		return IRQ_NONE;
1328 	}
1329 
1330 	ioaddr = dev->base_addr;
1331 	lp = dev->ml_priv;
1332 
1333 	spin_lock (&lp->lock);
1334 
1335 	wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1336 	status = lp->scb.status;
1337 
1338 	DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1339 			dev->name, irq, status));
1340 
1341 	ack_cmd = status & 0xf000;
1342 
1343 	if ((status & 0x8000) || (status & 0x2000)) {
1344 		struct i596_cmd *ptr;
1345 
1346 		handled = 1;
1347 		if ((status & 0x8000))
1348 			DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
1349 		if ((status & 0x2000))
1350 			DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1351 
1352 		while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
1353 			ptr = lp->cmd_head;
1354 
1355 			DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n",
1356 				       lp->cmd_head->status, lp->cmd_head->command));
1357 			lp->cmd_head = ptr->v_next;
1358 			lp->cmd_backlog--;
1359 
1360 			switch ((ptr->command) & 0x7) {
1361 			case CmdTx:
1362 			    {
1363 				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1364 				struct sk_buff *skb = tx_cmd->skb;
1365 
1366 				if ((ptr->status) & STAT_OK) {
1367 					DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1368 				} else {
1369 					dev->stats.tx_errors++;
1370 					if ((ptr->status) & 0x0020)
1371 						dev->stats.collisions++;
1372 					if (!((ptr->status) & 0x0040))
1373 						dev->stats.tx_heartbeat_errors++;
1374 					if ((ptr->status) & 0x0400)
1375 						dev->stats.tx_carrier_errors++;
1376 					if ((ptr->status) & 0x0800)
1377 						dev->stats.collisions++;
1378 					if ((ptr->status) & 0x1000)
1379 						dev->stats.tx_aborted_errors++;
1380 				}
1381 
1382 				dev_kfree_skb_irq(skb);
1383 
1384 				tx_cmd->cmd.command = 0; /* Mark free */
1385 				break;
1386 			    }
1387 			case CmdTDR:
1388 			    {
1389 				unsigned short status = ((struct tdr_cmd *)ptr)->status;
1390 
1391 				if (status & 0x8000) {
1392 					DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
1393 				} else {
1394 					if (status & 0x4000)
1395 						printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
1396 					if (status & 0x2000)
1397 						printk(KERN_ERR "%s: Termination problem.\n", dev->name);
1398 					if (status & 0x1000)
1399 						printk(KERN_ERR "%s: Short circuit.\n", dev->name);
1400 
1401 					DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
1402 				}
1403 				break;
1404 			    }
1405 			case CmdConfigure:
1406 			case CmdMulticastList:
1407 				/* Zap command so set_multicast_list() knows it is free */
1408 				ptr->command = 0;
1409 				break;
1410 			}
1411 			ptr->v_next = ptr->b_next = I596_NULL;
1412 			lp->last_cmd = jiffies;
1413 		}
1414 
1415 		ptr = lp->cmd_head;
1416 		while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
1417 			ptr->command &= 0x1fff;
1418 			ptr = ptr->v_next;
1419 		}
1420 
1421 		if ((lp->cmd_head != I596_NULL))
1422 			ack_cmd |= CUC_START;
1423 		lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
1424 	}
1425 	if ((status & 0x1000) || (status & 0x4000)) {
1426 		if ((status & 0x4000))
1427 			DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
1428 		i596_rx(dev);
1429 		/* Only RX_START if stopped - RGH 07-07-96 */
1430 		if (status & 0x1000) {
1431 			if (netif_running(dev)) {
1432 				DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1433 				ack_cmd |= RX_START;
1434 				dev->stats.rx_errors++;
1435 				dev->stats.rx_fifo_errors++;
1436 				rebuild_rx_bufs(dev);
1437 			}
1438 		}
1439 	}
1440 	wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1441 	lp->scb.command = ack_cmd;
1442 
1443 #ifdef ENABLE_MVME16x_NET
1444 	if (MACH_IS_MVME16x) {
1445 		/* Ack the interrupt */
1446 
1447 		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1448 
1449 		pcc2[0x2a] |= 0x08;
1450 	}
1451 #endif
1452 #ifdef ENABLE_BVME6000_NET
1453 	if (MACH_IS_BVME6000) {
1454 		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1455 
1456 		*ethirq = 1;
1457 		*ethirq = 3;
1458 	}
1459 #endif
1460 #ifdef ENABLE_APRICOT
1461 	(void) inb(ioaddr + 0x10);
1462 	outb(4, ioaddr + 0xf);
1463 #endif
1464 	CA(dev);
1465 
1466 	DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1467 
1468 	spin_unlock (&lp->lock);
1469 	return IRQ_RETVAL(handled);
1470 }
1471 
i596_close(struct net_device * dev)1472 static int i596_close(struct net_device *dev)
1473 {
1474 	struct i596_private *lp = dev->ml_priv;
1475 	unsigned long flags;
1476 
1477 	netif_stop_queue(dev);
1478 
1479 	DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1480 		       dev->name, lp->scb.status));
1481 
1482 	spin_lock_irqsave(&lp->lock, flags);
1483 
1484 	wait_cmd(dev,lp,100,"close1 timed out");
1485 	lp->scb.command = CUC_ABORT | RX_ABORT;
1486 	CA(dev);
1487 
1488 	wait_cmd(dev,lp,100,"close2 timed out");
1489 
1490 	spin_unlock_irqrestore(&lp->lock, flags);
1491 	DEB(DEB_STRUCT,i596_display_data(dev));
1492 	i596_cleanup_cmd(dev,lp);
1493 
1494 #ifdef ENABLE_MVME16x_NET
1495 	if (MACH_IS_MVME16x) {
1496 		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1497 
1498 		/* Disable all ints */
1499 		pcc2[0x28] = 1;
1500 		pcc2[0x2a] = 0x40;
1501 		pcc2[0x2b] = 0x40;	/* Set snooping bits now! */
1502 	}
1503 #endif
1504 #ifdef ENABLE_BVME6000_NET
1505 	if (MACH_IS_BVME6000) {
1506 		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1507 
1508 		*ethirq = 1;
1509 	}
1510 #endif
1511 
1512 #ifdef ENABLE_MVME16x_NET
1513 	free_irq(0x56, dev);
1514 #endif
1515 	free_irq(dev->irq, dev);
1516 	remove_rx_bufs(dev);
1517 
1518 	return 0;
1519 }
1520 
1521 /*
1522  *    Set or clear the multicast filter for this adaptor.
1523  */
1524 
set_multicast_list(struct net_device * dev)1525 static void set_multicast_list(struct net_device *dev)
1526 {
1527 	struct i596_private *lp = dev->ml_priv;
1528 	int config = 0, cnt;
1529 
1530 	DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1531 		dev->name, netdev_mc_count(dev),
1532 		dev->flags & IFF_PROMISC  ? "ON" : "OFF",
1533 		dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1534 
1535 	if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
1536 		return;
1537 
1538 	if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1539 		lp->cf_cmd.i596_config[8] |= 0x01;
1540 		config = 1;
1541 	}
1542 	if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1543 		lp->cf_cmd.i596_config[8] &= ~0x01;
1544 		config = 1;
1545 	}
1546 	if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1547 		lp->cf_cmd.i596_config[11] &= ~0x20;
1548 		config = 1;
1549 	}
1550 	if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1551 		lp->cf_cmd.i596_config[11] |= 0x20;
1552 		config = 1;
1553 	}
1554 	if (config) {
1555 		lp->cf_cmd.cmd.command = CmdConfigure;
1556 		i596_add_cmd(dev, &lp->cf_cmd.cmd);
1557 	}
1558 
1559 	cnt = netdev_mc_count(dev);
1560 	if (cnt > MAX_MC_CNT)
1561 	{
1562 		cnt = MAX_MC_CNT;
1563 		printk(KERN_ERR "%s: Only %d multicast addresses supported",
1564 			dev->name, cnt);
1565 	}
1566 
1567 	if (!netdev_mc_empty(dev)) {
1568 		struct netdev_hw_addr *ha;
1569 		unsigned char *cp;
1570 		struct mc_cmd *cmd;
1571 
1572 		if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
1573 			return;
1574 		cmd = &lp->mc_cmd;
1575 		cmd->cmd.command = CmdMulticastList;
1576 		cmd->mc_cnt = cnt * ETH_ALEN;
1577 		cp = cmd->mc_addrs;
1578 		netdev_for_each_mc_addr(ha, dev) {
1579 			if (!cnt--)
1580 				break;
1581 			memcpy(cp, ha->addr, ETH_ALEN);
1582 			if (i596_debug > 1)
1583 				DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
1584 						dev->name, cp));
1585 			cp += ETH_ALEN;
1586 		}
1587 		i596_add_cmd(dev, &cmd->cmd);
1588 	}
1589 }
1590 
1591 #ifdef MODULE
1592 static struct net_device *dev_82596;
1593 
1594 #ifdef ENABLE_APRICOT
1595 module_param(irq, int, 0);
1596 MODULE_PARM_DESC(irq, "Apricot IRQ number");
1597 #endif
1598 
1599 static int debug = -1;
1600 module_param(debug, int, 0);
1601 MODULE_PARM_DESC(debug, "i82596 debug mask");
1602 
init_module(void)1603 int __init init_module(void)
1604 {
1605 	if (debug >= 0)
1606 		i596_debug = debug;
1607 	dev_82596 = i82596_probe(-1);
1608 	if (IS_ERR(dev_82596))
1609 		return PTR_ERR(dev_82596);
1610 	return 0;
1611 }
1612 
cleanup_module(void)1613 void __exit cleanup_module(void)
1614 {
1615 	unregister_netdev(dev_82596);
1616 #ifdef __mc68000__
1617 	/* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1618 	 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1619 	 */
1620 
1621 	kernel_set_cachemode((void *)(dev_82596->mem_start), 4096,
1622 			IOMAP_FULL_CACHING);
1623 #endif
1624 	free_page ((u32)(dev_82596->mem_start));
1625 #ifdef ENABLE_APRICOT
1626 	/* If we don't do this, we can't re-insmod it later. */
1627 	release_region(dev_82596->base_addr, I596_TOTAL_SIZE);
1628 #endif
1629 	free_netdev(dev_82596);
1630 }
1631 
1632 #endif				/* MODULE */
1633