1 /*
2  * Amiga Linux/68k A2065 Ethernet Driver
3  *
4  * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
5  *
6  * Fixes and tips by:
7  *	- Janos Farkas (CHEXUM@sparta.banki.hu)
8  *	- Jes Degn Soerensen (jds@kom.auc.dk)
9  *	- Matt Domsch (Matt_Domsch@dell.com)
10  *
11  * ----------------------------------------------------------------------------
12  *
13  * This program is based on
14  *
15  *	ariadne.?:	Amiga Linux/68k Ariadne Ethernet Driver
16  *			(C) Copyright 1995 by Geert Uytterhoeven,
17  *                                            Peter De Schrijver
18  *
19  *	lance.c:	An AMD LANCE ethernet driver for linux.
20  *			Written 1993-94 by Donald Becker.
21  *
22  *	Am79C960:	PCnet(tm)-ISA Single-Chip Ethernet Controller
23  *			Advanced Micro Devices
24  *			Publication #16907, Rev. B, Amendment/0, May 1994
25  *
26  * ----------------------------------------------------------------------------
27  *
28  * This file is subject to the terms and conditions of the GNU General Public
29  * License.  See the file COPYING in the main directory of the Linux
30  * distribution for more details.
31  *
32  * ----------------------------------------------------------------------------
33  *
34  * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
35  *
36  *	- an Am7990 Local Area Network Controller for Ethernet (LANCE) with
37  *	  both 10BASE-2 (thin coax) and AUI (DB-15) connectors
38  */
39 
40 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41 
42 /*#define DEBUG*/
43 /*#define TEST_HITS*/
44 
45 #include <linux/errno.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/module.h>
49 #include <linux/stddef.h>
50 #include <linux/kernel.h>
51 #include <linux/interrupt.h>
52 #include <linux/ioport.h>
53 #include <linux/skbuff.h>
54 #include <linux/string.h>
55 #include <linux/init.h>
56 #include <linux/crc32.h>
57 #include <linux/zorro.h>
58 #include <linux/bitops.h>
59 
60 #include <asm/irq.h>
61 #include <asm/amigaints.h>
62 #include <asm/amigahw.h>
63 
64 #include "a2065.h"
65 
66 /* Transmit/Receive Ring Definitions */
67 
68 #define LANCE_LOG_TX_BUFFERS	(2)
69 #define LANCE_LOG_RX_BUFFERS	(4)
70 
71 #define TX_RING_SIZE		(1 << LANCE_LOG_TX_BUFFERS)
72 #define RX_RING_SIZE		(1 << LANCE_LOG_RX_BUFFERS)
73 
74 #define TX_RING_MOD_MASK	(TX_RING_SIZE - 1)
75 #define RX_RING_MOD_MASK	(RX_RING_SIZE - 1)
76 
77 #define PKT_BUF_SIZE		(1544)
78 #define RX_BUFF_SIZE            PKT_BUF_SIZE
79 #define TX_BUFF_SIZE            PKT_BUF_SIZE
80 
81 /* Layout of the Lance's RAM Buffer */
82 
83 struct lance_init_block {
84 	unsigned short mode;		/* Pre-set mode (reg. 15) */
85 	unsigned char phys_addr[6];     /* Physical ethernet address */
86 	unsigned filter[2];		/* Multicast filter. */
87 
88 	/* Receive and transmit ring base, along with extra bits. */
89 	unsigned short rx_ptr;		/* receive descriptor addr */
90 	unsigned short rx_len;		/* receive len and high addr */
91 	unsigned short tx_ptr;		/* transmit descriptor addr */
92 	unsigned short tx_len;		/* transmit len and high addr */
93 
94 	/* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
95 	struct lance_rx_desc brx_ring[RX_RING_SIZE];
96 	struct lance_tx_desc btx_ring[TX_RING_SIZE];
97 
98 	char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
99 	char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
100 };
101 
102 /* Private Device Data */
103 
104 struct lance_private {
105 	char *name;
106 	volatile struct lance_regs *ll;
107 	volatile struct lance_init_block *init_block;	    /* Hosts view */
108 	volatile struct lance_init_block *lance_init_block; /* Lance view */
109 
110 	int rx_new, tx_new;
111 	int rx_old, tx_old;
112 
113 	int lance_log_rx_bufs, lance_log_tx_bufs;
114 	int rx_ring_mod_mask, tx_ring_mod_mask;
115 
116 	int tpe;		      /* cable-selection is TPE */
117 	int auto_select;	      /* cable-selection by carrier */
118 	unsigned short busmaster_regval;
119 
120 #ifdef CONFIG_SUNLANCE
121 	struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
122 	int burst_sizes;	      /* ledma SBus burst sizes */
123 #endif
124 	struct timer_list         multicast_timer;
125 };
126 
127 #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
128 
129 /* Load the CSR registers */
load_csrs(struct lance_private * lp)130 static void load_csrs(struct lance_private *lp)
131 {
132 	volatile struct lance_regs *ll = lp->ll;
133 	volatile struct lance_init_block *aib = lp->lance_init_block;
134 	int leptr = LANCE_ADDR(aib);
135 
136 	ll->rap = LE_CSR1;
137 	ll->rdp = (leptr & 0xFFFF);
138 	ll->rap = LE_CSR2;
139 	ll->rdp = leptr >> 16;
140 	ll->rap = LE_CSR3;
141 	ll->rdp = lp->busmaster_regval;
142 
143 	/* Point back to csr0 */
144 	ll->rap = LE_CSR0;
145 }
146 
147 /* Setup the Lance Rx and Tx rings */
lance_init_ring(struct net_device * dev)148 static void lance_init_ring(struct net_device *dev)
149 {
150 	struct lance_private *lp = netdev_priv(dev);
151 	volatile struct lance_init_block *ib = lp->init_block;
152 	volatile struct lance_init_block *aib = lp->lance_init_block;
153 					/* for LANCE_ADDR computations */
154 	int leptr;
155 	int i;
156 
157 	/* Lock out other processes while setting up hardware */
158 	netif_stop_queue(dev);
159 	lp->rx_new = lp->tx_new = 0;
160 	lp->rx_old = lp->tx_old = 0;
161 
162 	ib->mode = 0;
163 
164 	/* Copy the ethernet address to the lance init block
165 	 * Note that on the sparc you need to swap the ethernet address.
166 	 */
167 	ib->phys_addr[0] = dev->dev_addr[1];
168 	ib->phys_addr[1] = dev->dev_addr[0];
169 	ib->phys_addr[2] = dev->dev_addr[3];
170 	ib->phys_addr[3] = dev->dev_addr[2];
171 	ib->phys_addr[4] = dev->dev_addr[5];
172 	ib->phys_addr[5] = dev->dev_addr[4];
173 
174 	/* Setup the Tx ring entries */
175 	netdev_dbg(dev, "TX rings:\n");
176 	for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) {
177 		leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
178 		ib->btx_ring[i].tmd0      = leptr;
179 		ib->btx_ring[i].tmd1_hadr = leptr >> 16;
180 		ib->btx_ring[i].tmd1_bits = 0;
181 		ib->btx_ring[i].length    = 0xf000; /* The ones required by tmd2 */
182 		ib->btx_ring[i].misc      = 0;
183 		if (i < 3)
184 			netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
185 	}
186 
187 	/* Setup the Rx ring entries */
188 	netdev_dbg(dev, "RX rings:\n");
189 	for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) {
190 		leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
191 
192 		ib->brx_ring[i].rmd0      = leptr;
193 		ib->brx_ring[i].rmd1_hadr = leptr >> 16;
194 		ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
195 		ib->brx_ring[i].length    = -RX_BUFF_SIZE | 0xf000;
196 		ib->brx_ring[i].mblength  = 0;
197 		if (i < 3)
198 			netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
199 	}
200 
201 	/* Setup the initialization block */
202 
203 	/* Setup rx descriptor pointer */
204 	leptr = LANCE_ADDR(&aib->brx_ring);
205 	ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
206 	ib->rx_ptr = leptr;
207 	netdev_dbg(dev, "RX ptr: %08x\n", leptr);
208 
209 	/* Setup tx descriptor pointer */
210 	leptr = LANCE_ADDR(&aib->btx_ring);
211 	ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
212 	ib->tx_ptr = leptr;
213 	netdev_dbg(dev, "TX ptr: %08x\n", leptr);
214 
215 	/* Clear the multicast filter */
216 	ib->filter[0] = 0;
217 	ib->filter[1] = 0;
218 }
219 
init_restart_lance(struct lance_private * lp)220 static int init_restart_lance(struct lance_private *lp)
221 {
222 	volatile struct lance_regs *ll = lp->ll;
223 	int i;
224 
225 	ll->rap = LE_CSR0;
226 	ll->rdp = LE_C0_INIT;
227 
228 	/* Wait for the lance to complete initialization */
229 	for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
230 		barrier();
231 	if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
232 		pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp);
233 		return -EIO;
234 	}
235 
236 	/* Clear IDON by writing a "1", enable interrupts and start lance */
237 	ll->rdp = LE_C0_IDON;
238 	ll->rdp = LE_C0_INEA | LE_C0_STRT;
239 
240 	return 0;
241 }
242 
lance_rx(struct net_device * dev)243 static int lance_rx(struct net_device *dev)
244 {
245 	struct lance_private *lp = netdev_priv(dev);
246 	volatile struct lance_init_block *ib = lp->init_block;
247 	volatile struct lance_regs *ll = lp->ll;
248 	volatile struct lance_rx_desc *rd;
249 	unsigned char bits;
250 
251 #ifdef TEST_HITS
252 	int i;
253 	char buf[RX_RING_SIZE + 1];
254 
255 	for (i = 0; i < RX_RING_SIZE; i++) {
256 		char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN;
257 		if (i == lp->rx_new)
258 			buf[i] = r1_own ? '_' : 'X';
259 		else
260 			buf[i] = r1_own ? '.' : '1';
261 	}
262 	buf[RX_RING_SIZE] = 0;
263 
264 	pr_debug("RxRing TestHits: [%s]\n", buf);
265 #endif
266 
267 	ll->rdp = LE_C0_RINT | LE_C0_INEA;
268 	for (rd = &ib->brx_ring[lp->rx_new];
269 	     !((bits = rd->rmd1_bits) & LE_R1_OWN);
270 	     rd = &ib->brx_ring[lp->rx_new]) {
271 
272 		/* We got an incomplete frame? */
273 		if ((bits & LE_R1_POK) != LE_R1_POK) {
274 			dev->stats.rx_over_errors++;
275 			dev->stats.rx_errors++;
276 			continue;
277 		} else if (bits & LE_R1_ERR) {
278 			/* Count only the end frame as a rx error,
279 			 * not the beginning
280 			 */
281 			if (bits & LE_R1_BUF)
282 				dev->stats.rx_fifo_errors++;
283 			if (bits & LE_R1_CRC)
284 				dev->stats.rx_crc_errors++;
285 			if (bits & LE_R1_OFL)
286 				dev->stats.rx_over_errors++;
287 			if (bits & LE_R1_FRA)
288 				dev->stats.rx_frame_errors++;
289 			if (bits & LE_R1_EOP)
290 				dev->stats.rx_errors++;
291 		} else {
292 			int len = (rd->mblength & 0xfff) - 4;
293 			struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
294 
295 			if (!skb) {
296 				netdev_warn(dev, "Memory squeeze, deferring packet\n");
297 				dev->stats.rx_dropped++;
298 				rd->mblength = 0;
299 				rd->rmd1_bits = LE_R1_OWN;
300 				lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
301 				return 0;
302 			}
303 
304 			skb_reserve(skb, 2);		/* 16 byte align */
305 			skb_put(skb, len);		/* make room */
306 			skb_copy_to_linear_data(skb,
307 				 (unsigned char *)&ib->rx_buf[lp->rx_new][0],
308 				 len);
309 			skb->protocol = eth_type_trans(skb, dev);
310 			netif_rx(skb);
311 			dev->stats.rx_packets++;
312 			dev->stats.rx_bytes += len;
313 		}
314 
315 		/* Return the packet to the pool */
316 		rd->mblength = 0;
317 		rd->rmd1_bits = LE_R1_OWN;
318 		lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
319 	}
320 	return 0;
321 }
322 
lance_tx(struct net_device * dev)323 static int lance_tx(struct net_device *dev)
324 {
325 	struct lance_private *lp = netdev_priv(dev);
326 	volatile struct lance_init_block *ib = lp->init_block;
327 	volatile struct lance_regs *ll = lp->ll;
328 	volatile struct lance_tx_desc *td;
329 	int i, j;
330 	int status;
331 
332 	/* csr0 is 2f3 */
333 	ll->rdp = LE_C0_TINT | LE_C0_INEA;
334 	/* csr0 is 73 */
335 
336 	j = lp->tx_old;
337 	for (i = j; i != lp->tx_new; i = j) {
338 		td = &ib->btx_ring[i];
339 
340 		/* If we hit a packet not owned by us, stop */
341 		if (td->tmd1_bits & LE_T1_OWN)
342 			break;
343 
344 		if (td->tmd1_bits & LE_T1_ERR) {
345 			status = td->misc;
346 
347 			dev->stats.tx_errors++;
348 			if (status & LE_T3_RTY)
349 				dev->stats.tx_aborted_errors++;
350 			if (status & LE_T3_LCOL)
351 				dev->stats.tx_window_errors++;
352 
353 			if (status & LE_T3_CLOS) {
354 				dev->stats.tx_carrier_errors++;
355 				if (lp->auto_select) {
356 					lp->tpe = 1 - lp->tpe;
357 					netdev_err(dev, "Carrier Lost, trying %s\n",
358 						   lp->tpe ? "TPE" : "AUI");
359 					/* Stop the lance */
360 					ll->rap = LE_CSR0;
361 					ll->rdp = LE_C0_STOP;
362 					lance_init_ring(dev);
363 					load_csrs(lp);
364 					init_restart_lance(lp);
365 					return 0;
366 				}
367 			}
368 
369 			/* buffer errors and underflows turn off
370 			 * the transmitter, so restart the adapter
371 			 */
372 			if (status & (LE_T3_BUF | LE_T3_UFL)) {
373 				dev->stats.tx_fifo_errors++;
374 
375 				netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n");
376 				/* Stop the lance */
377 				ll->rap = LE_CSR0;
378 				ll->rdp = LE_C0_STOP;
379 				lance_init_ring(dev);
380 				load_csrs(lp);
381 				init_restart_lance(lp);
382 				return 0;
383 			}
384 		} else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
385 			/* So we don't count the packet more than once. */
386 			td->tmd1_bits &= ~(LE_T1_POK);
387 
388 			/* One collision before packet was sent. */
389 			if (td->tmd1_bits & LE_T1_EONE)
390 				dev->stats.collisions++;
391 
392 			/* More than one collision, be optimistic. */
393 			if (td->tmd1_bits & LE_T1_EMORE)
394 				dev->stats.collisions += 2;
395 
396 			dev->stats.tx_packets++;
397 		}
398 
399 		j = (j + 1) & lp->tx_ring_mod_mask;
400 	}
401 	lp->tx_old = j;
402 	ll->rdp = LE_C0_TINT | LE_C0_INEA;
403 	return 0;
404 }
405 
lance_tx_buffs_avail(struct lance_private * lp)406 static int lance_tx_buffs_avail(struct lance_private *lp)
407 {
408 	if (lp->tx_old <= lp->tx_new)
409 		return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new;
410 	return lp->tx_old - lp->tx_new - 1;
411 }
412 
lance_interrupt(int irq,void * dev_id)413 static irqreturn_t lance_interrupt(int irq, void *dev_id)
414 {
415 	struct net_device *dev = dev_id;
416 	struct lance_private *lp = netdev_priv(dev);
417 	volatile struct lance_regs *ll = lp->ll;
418 	int csr0;
419 
420 	ll->rap = LE_CSR0;		/* LANCE Controller Status */
421 	csr0 = ll->rdp;
422 
423 	if (!(csr0 & LE_C0_INTR))	/* Check if any interrupt has */
424 		return IRQ_NONE;	/* been generated by the Lance. */
425 
426 	/* Acknowledge all the interrupt sources ASAP */
427 	ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT |
428 			   LE_C0_INIT);
429 
430 	if (csr0 & LE_C0_ERR) {
431 		/* Clear the error condition */
432 		ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA;
433 	}
434 
435 	if (csr0 & LE_C0_RINT)
436 		lance_rx(dev);
437 
438 	if (csr0 & LE_C0_TINT)
439 		lance_tx(dev);
440 
441 	/* Log misc errors. */
442 	if (csr0 & LE_C0_BABL)
443 		dev->stats.tx_errors++;       /* Tx babble. */
444 	if (csr0 & LE_C0_MISS)
445 		dev->stats.rx_errors++;       /* Missed a Rx frame. */
446 	if (csr0 & LE_C0_MERR) {
447 		netdev_err(dev, "Bus master arbitration failure, status %04x\n",
448 			   csr0);
449 		/* Restart the chip. */
450 		ll->rdp = LE_C0_STRT;
451 	}
452 
453 	if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0)
454 		netif_wake_queue(dev);
455 
456 	ll->rap = LE_CSR0;
457 	ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR |
458 		   LE_C0_IDON | LE_C0_INEA);
459 	return IRQ_HANDLED;
460 }
461 
lance_open(struct net_device * dev)462 static int lance_open(struct net_device *dev)
463 {
464 	struct lance_private *lp = netdev_priv(dev);
465 	volatile struct lance_regs *ll = lp->ll;
466 	int ret;
467 
468 	/* Stop the Lance */
469 	ll->rap = LE_CSR0;
470 	ll->rdp = LE_C0_STOP;
471 
472 	/* Install the Interrupt handler */
473 	ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
474 			  dev->name, dev);
475 	if (ret)
476 		return ret;
477 
478 	load_csrs(lp);
479 	lance_init_ring(dev);
480 
481 	netif_start_queue(dev);
482 
483 	return init_restart_lance(lp);
484 }
485 
lance_close(struct net_device * dev)486 static int lance_close(struct net_device *dev)
487 {
488 	struct lance_private *lp = netdev_priv(dev);
489 	volatile struct lance_regs *ll = lp->ll;
490 
491 	netif_stop_queue(dev);
492 	del_timer_sync(&lp->multicast_timer);
493 
494 	/* Stop the card */
495 	ll->rap = LE_CSR0;
496 	ll->rdp = LE_C0_STOP;
497 
498 	free_irq(IRQ_AMIGA_PORTS, dev);
499 	return 0;
500 }
501 
lance_reset(struct net_device * dev)502 static inline int lance_reset(struct net_device *dev)
503 {
504 	struct lance_private *lp = netdev_priv(dev);
505 	volatile struct lance_regs *ll = lp->ll;
506 	int status;
507 
508 	/* Stop the lance */
509 	ll->rap = LE_CSR0;
510 	ll->rdp = LE_C0_STOP;
511 
512 	load_csrs(lp);
513 
514 	lance_init_ring(dev);
515 	dev->trans_start = jiffies; /* prevent tx timeout */
516 	netif_start_queue(dev);
517 
518 	status = init_restart_lance(lp);
519 	netdev_dbg(dev, "Lance restart=%d\n", status);
520 
521 	return status;
522 }
523 
lance_tx_timeout(struct net_device * dev)524 static void lance_tx_timeout(struct net_device *dev)
525 {
526 	struct lance_private *lp = netdev_priv(dev);
527 	volatile struct lance_regs *ll = lp->ll;
528 
529 	netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp);
530 	lance_reset(dev);
531 	netif_wake_queue(dev);
532 }
533 
lance_start_xmit(struct sk_buff * skb,struct net_device * dev)534 static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
535 				    struct net_device *dev)
536 {
537 	struct lance_private *lp = netdev_priv(dev);
538 	volatile struct lance_regs *ll = lp->ll;
539 	volatile struct lance_init_block *ib = lp->init_block;
540 	int entry, skblen;
541 	int status = NETDEV_TX_OK;
542 	unsigned long flags;
543 
544 	if (skb_padto(skb, ETH_ZLEN))
545 		return NETDEV_TX_OK;
546 	skblen = max_t(unsigned, skb->len, ETH_ZLEN);
547 
548 	local_irq_save(flags);
549 
550 	if (!lance_tx_buffs_avail(lp)) {
551 		local_irq_restore(flags);
552 		return NETDEV_TX_LOCKED;
553 	}
554 
555 #ifdef DEBUG
556 	/* dump the packet */
557 	print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
558 		       16, 1, skb->data, 64, true);
559 #endif
560 	entry = lp->tx_new & lp->tx_ring_mod_mask;
561 	ib->btx_ring[entry].length = (-skblen) | 0xf000;
562 	ib->btx_ring[entry].misc = 0;
563 
564 	skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
565 
566 	/* Now, give the packet to the lance */
567 	ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
568 	lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
569 	dev->stats.tx_bytes += skblen;
570 
571 	if (lance_tx_buffs_avail(lp) <= 0)
572 		netif_stop_queue(dev);
573 
574 	/* Kick the lance: transmit now */
575 	ll->rdp = LE_C0_INEA | LE_C0_TDMD;
576 	dev_kfree_skb(skb);
577 
578 	local_irq_restore(flags);
579 
580 	return status;
581 }
582 
583 /* taken from the depca driver */
lance_load_multicast(struct net_device * dev)584 static void lance_load_multicast(struct net_device *dev)
585 {
586 	struct lance_private *lp = netdev_priv(dev);
587 	volatile struct lance_init_block *ib = lp->init_block;
588 	volatile u16 *mcast_table = (u16 *)&ib->filter;
589 	struct netdev_hw_addr *ha;
590 	u32 crc;
591 
592 	/* set all multicast bits */
593 	if (dev->flags & IFF_ALLMULTI) {
594 		ib->filter[0] = 0xffffffff;
595 		ib->filter[1] = 0xffffffff;
596 		return;
597 	}
598 	/* clear the multicast filter */
599 	ib->filter[0] = 0;
600 	ib->filter[1] = 0;
601 
602 	/* Add addresses */
603 	netdev_for_each_mc_addr(ha, dev) {
604 		crc = ether_crc_le(6, ha->addr);
605 		crc = crc >> 26;
606 		mcast_table[crc >> 4] |= 1 << (crc & 0xf);
607 	}
608 }
609 
lance_set_multicast(struct net_device * dev)610 static void lance_set_multicast(struct net_device *dev)
611 {
612 	struct lance_private *lp = netdev_priv(dev);
613 	volatile struct lance_init_block *ib = lp->init_block;
614 	volatile struct lance_regs *ll = lp->ll;
615 
616 	if (!netif_running(dev))
617 		return;
618 
619 	if (lp->tx_old != lp->tx_new) {
620 		mod_timer(&lp->multicast_timer, jiffies + 4);
621 		netif_wake_queue(dev);
622 		return;
623 	}
624 
625 	netif_stop_queue(dev);
626 
627 	ll->rap = LE_CSR0;
628 	ll->rdp = LE_C0_STOP;
629 	lance_init_ring(dev);
630 
631 	if (dev->flags & IFF_PROMISC) {
632 		ib->mode |= LE_MO_PROM;
633 	} else {
634 		ib->mode &= ~LE_MO_PROM;
635 		lance_load_multicast(dev);
636 	}
637 	load_csrs(lp);
638 	init_restart_lance(lp);
639 	netif_wake_queue(dev);
640 }
641 
642 static int __devinit a2065_init_one(struct zorro_dev *z,
643 				    const struct zorro_device_id *ent);
644 static void __devexit a2065_remove_one(struct zorro_dev *z);
645 
646 
647 static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
648 	{ ZORRO_PROD_CBM_A2065_1 },
649 	{ ZORRO_PROD_CBM_A2065_2 },
650 	{ ZORRO_PROD_AMERISTAR_A2065 },
651 	{ 0 }
652 };
653 MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
654 
655 static struct zorro_driver a2065_driver = {
656 	.name		= "a2065",
657 	.id_table	= a2065_zorro_tbl,
658 	.probe		= a2065_init_one,
659 	.remove		= __devexit_p(a2065_remove_one),
660 };
661 
662 static const struct net_device_ops lance_netdev_ops = {
663 	.ndo_open		= lance_open,
664 	.ndo_stop		= lance_close,
665 	.ndo_start_xmit		= lance_start_xmit,
666 	.ndo_tx_timeout		= lance_tx_timeout,
667 	.ndo_set_rx_mode	= lance_set_multicast,
668 	.ndo_validate_addr	= eth_validate_addr,
669 	.ndo_change_mtu		= eth_change_mtu,
670 	.ndo_set_mac_address	= eth_mac_addr,
671 };
672 
a2065_init_one(struct zorro_dev * z,const struct zorro_device_id * ent)673 static int __devinit a2065_init_one(struct zorro_dev *z,
674 				    const struct zorro_device_id *ent)
675 {
676 	struct net_device *dev;
677 	struct lance_private *priv;
678 	unsigned long board = z->resource.start;
679 	unsigned long base_addr = board + A2065_LANCE;
680 	unsigned long mem_start = board + A2065_RAM;
681 	struct resource *r1, *r2;
682 	int err;
683 
684 	r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
685 				"Am7990");
686 	if (!r1)
687 		return -EBUSY;
688 	r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
689 	if (!r2) {
690 		release_mem_region(base_addr, sizeof(struct lance_regs));
691 		return -EBUSY;
692 	}
693 
694 	dev = alloc_etherdev(sizeof(struct lance_private));
695 	if (dev == NULL) {
696 		release_mem_region(base_addr, sizeof(struct lance_regs));
697 		release_mem_region(mem_start, A2065_RAM_SIZE);
698 		return -ENOMEM;
699 	}
700 
701 	priv = netdev_priv(dev);
702 
703 	r1->name = dev->name;
704 	r2->name = dev->name;
705 
706 	dev->dev_addr[0] = 0x00;
707 	if (z->id != ZORRO_PROD_AMERISTAR_A2065) {	/* Commodore */
708 		dev->dev_addr[1] = 0x80;
709 		dev->dev_addr[2] = 0x10;
710 	} else {					/* Ameristar */
711 		dev->dev_addr[1] = 0x00;
712 		dev->dev_addr[2] = 0x9f;
713 	}
714 	dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
715 	dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
716 	dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
717 	dev->base_addr = ZTWO_VADDR(base_addr);
718 	dev->mem_start = ZTWO_VADDR(mem_start);
719 	dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
720 
721 	priv->ll = (volatile struct lance_regs *)dev->base_addr;
722 	priv->init_block = (struct lance_init_block *)dev->mem_start;
723 	priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
724 	priv->auto_select = 0;
725 	priv->busmaster_regval = LE_C3_BSWP;
726 
727 	priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
728 	priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
729 	priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
730 	priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
731 
732 	dev->netdev_ops = &lance_netdev_ops;
733 	dev->watchdog_timeo = 5*HZ;
734 	dev->dma = 0;
735 
736 	init_timer(&priv->multicast_timer);
737 	priv->multicast_timer.data = (unsigned long) dev;
738 	priv->multicast_timer.function =
739 		(void (*)(unsigned long))lance_set_multicast;
740 
741 	err = register_netdev(dev);
742 	if (err) {
743 		release_mem_region(base_addr, sizeof(struct lance_regs));
744 		release_mem_region(mem_start, A2065_RAM_SIZE);
745 		free_netdev(dev);
746 		return err;
747 	}
748 	zorro_set_drvdata(z, dev);
749 
750 	netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n",
751 		    board, dev->dev_addr);
752 
753 	return 0;
754 }
755 
756 
a2065_remove_one(struct zorro_dev * z)757 static void __devexit a2065_remove_one(struct zorro_dev *z)
758 {
759 	struct net_device *dev = zorro_get_drvdata(z);
760 
761 	unregister_netdev(dev);
762 	release_mem_region(ZTWO_PADDR(dev->base_addr),
763 			   sizeof(struct lance_regs));
764 	release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
765 	free_netdev(dev);
766 }
767 
a2065_init_module(void)768 static int __init a2065_init_module(void)
769 {
770 	return zorro_register_driver(&a2065_driver);
771 }
772 
a2065_cleanup_module(void)773 static void __exit a2065_cleanup_module(void)
774 {
775 	zorro_unregister_driver(&a2065_driver);
776 }
777 
778 module_init(a2065_init_module);
779 module_exit(a2065_cleanup_module);
780 
781 MODULE_LICENSE("GPL");
782