1 /*
2  *
3  * Alchemy Au1x00 ethernet driver
4  *
5  * Copyright 2001,2002,2003 MontaVista Software Inc.
6  * Copyright 2002 TimeSys Corp.
7  * Author: MontaVista Software, Inc.
8  *         	ppopov@mvista.com or source@mvista.com
9  *
10  * ########################################################################
11  *
12  *  This program is free software; you can distribute it and/or modify it
13  *  under the terms of the GNU General Public License (Version 2) as
14  *  published by the Free Software Foundation.
15  *
16  *  This program is distributed in the hope it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with this program; if not, write to the Free Software Foundation, Inc.,
23  *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
24  *
25  * ########################################################################
26  *
27  *
28  */
29 
30 #ifndef __mips__
31 #error This driver only works with MIPS architectures!
32 #endif
33 
34 
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/string.h>
39 #include <linux/timer.h>
40 #include <linux/errno.h>
41 #include <linux/in.h>
42 #include <linux/ioport.h>
43 #include <linux/slab.h>
44 #include <linux/interrupt.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/skbuff.h>
50 #include <linux/delay.h>
51 #include <asm/mipsregs.h>
52 #include <asm/irq.h>
53 #include <asm/bitops.h>
54 #include <asm/io.h>
55 #include <asm/processor.h>
56 
57 #include <asm/au1000.h>
58 #include <asm/cpu.h>
59 #include "au1000_eth.h"
60 
61 
62 #ifdef AU1000_ETH_DEBUG
63 static int au1000_debug = 5;
64 #else
65 static int au1000_debug = 3;
66 #endif
67 
68 MODULE_LICENSE("GPL");
69 
70 // prototypes
71 static void *dma_alloc(size_t, dma_addr_t *);
72 static void dma_free(void *, size_t);
73 static void hard_stop(struct net_device *);
74 static void enable_rx_tx(struct net_device *dev);
75 static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
76 static int au1000_init(struct net_device *);
77 static int au1000_open(struct net_device *);
78 static int au1000_close(struct net_device *);
79 static int au1000_tx(struct sk_buff *, struct net_device *);
80 static int au1000_rx(struct net_device *);
81 static void au1000_interrupt(int, void *, struct pt_regs *);
82 static void au1000_tx_timeout(struct net_device *);
83 static int au1000_set_config(struct net_device *dev, struct ifmap *map);
84 static void set_rx_mode(struct net_device *);
85 static struct net_device_stats *au1000_get_stats(struct net_device *);
86 static void au1000_timer(unsigned long);
87 static int au1000_ioctl(struct net_device *, struct ifreq *, int);
88 static int mdio_read(struct net_device *, int, int);
89 static void mdio_write(struct net_device *, int, int, u16);
90 static void dump_mii(struct net_device *dev, int phy_id);
91 
92 // externs
93 extern  void ack_rise_edge_irq(unsigned int);
94 extern int get_ethernet_addr(char *ethernet_addr);
95 extern inline void str2eaddr(unsigned char *ea, unsigned char *str);
96 extern inline unsigned char str2hexnum(unsigned char c);
97 extern char * __init prom_getcmdline(void);
98 
99 /*
100  * Theory of operation
101  *
102  * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
103  * There are four receive and four transmit descriptors.  These
104  * descriptors are not in memory; rather, they are just a set of
105  * hardware registers.
106  *
107  * Since the Au1000 has a coherent data cache, the receive and
108  * transmit buffers are allocated from the KSEG0 segment. The
109  * hardware registers, however, are still mapped at KSEG1 to
110  * make sure there's no out-of-order writes, and that all writes
111  * complete immediately.
112  */
113 
114 
115 static char version[] __devinitdata =
116     "au1000eth.c:1.4 ppopov@mvista.com\n";
117 
118 /* These addresses are only used if yamon doesn't tell us what
119  * the mac address is, and the mac address is not passed on the
120  * command line.
121  */
122 static unsigned char au1000_mac_addr[6] __devinitdata = {
123 	0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
124 };
125 
126 #define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
127 #define RUN_AT(x) (jiffies + (x))
128 
129 // For reading/writing 32-bit words from/to DMA memory
130 #define cpu_to_dma32 cpu_to_be32
131 #define dma32_to_cpu be32_to_cpu
132 
133 struct au1000_private *au_macs[NUM_ETH_INTERFACES];
134 
135 /* FIXME
136  * All of the PHY code really should be detached from the MAC
137  * code.
138  */
139 
140 static char *phy_link[] =
141 	{"unknown",
142 	"10Base2", "10BaseT",
143 	"AUI",
144 	"100BaseT", "100BaseTX", "100BaseFX"
145 	};
146 
bcm_5201_init(struct net_device * dev,int phy_addr)147 int bcm_5201_init(struct net_device *dev, int phy_addr)
148 {
149 	s16 data;
150 
151 	/* Stop auto-negotiation */
152 	data = mdio_read(dev, phy_addr, MII_CONTROL);
153 	mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
154 
155 	/* Set advertisement to 10/100 and Half/Full duplex
156 	 * (full capabilities) */
157 	data = mdio_read(dev, phy_addr, MII_ANADV);
158 	data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
159 	mdio_write(dev, phy_addr, MII_ANADV, data);
160 
161 	/* Restart auto-negotiation */
162 	data = mdio_read(dev, phy_addr, MII_CONTROL);
163 	data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
164 	mdio_write(dev, phy_addr, MII_CONTROL, data);
165 
166 	if (au1000_debug > 4)
167 		dump_mii(dev, phy_addr);
168 	return 0;
169 }
170 
bcm_5201_reset(struct net_device * dev,int phy_addr)171 int bcm_5201_reset(struct net_device *dev, int phy_addr)
172 {
173 	s16 mii_control, timeout;
174 
175 	mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
176 	mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
177 	mdelay(1);
178 	for (timeout = 100; timeout > 0; --timeout) {
179 		mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
180 		if ((mii_control & MII_CNTL_RESET) == 0)
181 			break;
182 		mdelay(1);
183 	}
184 	if (mii_control & MII_CNTL_RESET) {
185 		printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
186 		return -1;
187 	}
188 	return 0;
189 }
190 
191 int
bcm_5201_status(struct net_device * dev,int phy_addr,u16 * link,u16 * speed)192 bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
193 {
194 	u16 mii_data;
195 	struct au1000_private *aup;
196 
197 	if (!dev) {
198 		printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
199 		return -1;
200 	}
201 	aup = (struct au1000_private *) dev->priv;
202 
203 	mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
204 	if (mii_data & MII_STAT_LINK) {
205 		*link = 1;
206 		mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
207 		if (mii_data & MII_AUX_100) {
208 			if (mii_data & MII_AUX_FDX) {
209 				*speed = IF_PORT_100BASEFX;
210 				dev->if_port = IF_PORT_100BASEFX;
211 			}
212 			else {
213 				*speed = IF_PORT_100BASETX;
214 				dev->if_port = IF_PORT_100BASETX;
215 			}
216 		}
217 		else  {
218 			*speed = IF_PORT_10BASET;
219 			dev->if_port = IF_PORT_10BASET;
220 		}
221 
222 	}
223 	else {
224 		*link = 0;
225 		*speed = 0;
226 		dev->if_port = IF_PORT_UNKNOWN;
227 	}
228 	return 0;
229 }
230 
lsi_80227_init(struct net_device * dev,int phy_addr)231 int lsi_80227_init(struct net_device *dev, int phy_addr)
232 {
233 	if (au1000_debug > 4)
234 		printk("lsi_80227_init\n");
235 
236 	/* restart auto-negotiation */
237 	mdio_write(dev, phy_addr, MII_CONTROL,
238 		   MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
239 	mdelay(1);
240 
241 	/* set up LEDs to correct display */
242 #ifdef CONFIG_MIPS_MTX1
243 	mdio_write(dev, phy_addr, 17, 0xff80);
244 #else
245 	mdio_write(dev, phy_addr, 17, 0xffc0);
246 #endif
247 
248 	if (au1000_debug > 4)
249 		dump_mii(dev, phy_addr);
250 	return 0;
251 }
252 
lsi_80227_reset(struct net_device * dev,int phy_addr)253 int lsi_80227_reset(struct net_device *dev, int phy_addr)
254 {
255 	s16 mii_control, timeout;
256 
257 	if (au1000_debug > 4) {
258 		printk("lsi_80227_reset\n");
259 		dump_mii(dev, phy_addr);
260 	}
261 
262 	mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
263 	mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
264 	mdelay(1);
265 	for (timeout = 100; timeout > 0; --timeout) {
266 		mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
267 		if ((mii_control & MII_CNTL_RESET) == 0)
268 			break;
269 		mdelay(1);
270 	}
271 	if (mii_control & MII_CNTL_RESET) {
272 		printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
273 		return -1;
274 	}
275 	return 0;
276 }
277 
278 int
lsi_80227_status(struct net_device * dev,int phy_addr,u16 * link,u16 * speed)279 lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
280 {
281 	u16 mii_data;
282 	struct au1000_private *aup;
283 
284 	if (!dev) {
285 		printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
286 		return -1;
287 	}
288 	aup = (struct au1000_private *) dev->priv;
289 
290 	mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
291 	if (mii_data & MII_STAT_LINK) {
292 		*link = 1;
293 		mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
294 		if (mii_data & MII_LSI_PHY_STAT_SPD) {
295 			if (mii_data & MII_LSI_PHY_STAT_FDX) {
296 				*speed = IF_PORT_100BASEFX;
297 				dev->if_port = IF_PORT_100BASEFX;
298 			}
299 			else {
300 				*speed = IF_PORT_100BASETX;
301 				dev->if_port = IF_PORT_100BASETX;
302 			}
303 		}
304 		else  {
305 			*speed = IF_PORT_10BASET;
306 			dev->if_port = IF_PORT_10BASET;
307 		}
308 
309 	}
310 	else {
311 		*link = 0;
312 		*speed = 0;
313 		dev->if_port = IF_PORT_UNKNOWN;
314 	}
315 	return 0;
316 }
317 
am79c901_init(struct net_device * dev,int phy_addr)318 int am79c901_init(struct net_device *dev, int phy_addr)
319 {
320 	printk("am79c901_init\n");
321 	return 0;
322 }
323 
am79c901_reset(struct net_device * dev,int phy_addr)324 int am79c901_reset(struct net_device *dev, int phy_addr)
325 {
326 	printk("am79c901_reset\n");
327 	return 0;
328 }
329 
330 int
am79c901_status(struct net_device * dev,int phy_addr,u16 * link,u16 * speed)331 am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
332 {
333 	return 0;
334 }
335 
am79c874_init(struct net_device * dev,int phy_addr)336 int am79c874_init(struct net_device *dev, int phy_addr)
337 {
338 	s16 data;
339 
340 	/* 79c874 has quit resembled bit assignments to BCM5201 */
341 	if (au1000_debug > 4)
342 		printk("am79c847_init\n");
343 
344 	/* Stop auto-negotiation */
345 	data = mdio_read(dev, phy_addr, MII_CONTROL);
346 	mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
347 
348 	/* Set advertisement to 10/100 and Half/Full duplex
349 	 * (full capabilities) */
350 	data = mdio_read(dev, phy_addr, MII_ANADV);
351 	data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
352 	mdio_write(dev, phy_addr, MII_ANADV, data);
353 
354 	/* Restart auto-negotiation */
355 	data = mdio_read(dev, phy_addr, MII_CONTROL);
356 	data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
357 
358 	mdio_write(dev, phy_addr, MII_CONTROL, data);
359 
360 	if (au1000_debug > 4) dump_mii(dev, phy_addr);
361 	return 0;
362 }
363 
am79c874_reset(struct net_device * dev,int phy_addr)364 int am79c874_reset(struct net_device *dev, int phy_addr)
365 {
366 	s16 mii_control, timeout;
367 
368 	if (au1000_debug > 4)
369 		printk("am79c874_reset\n");
370 
371 	mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
372 	mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
373 	mdelay(1);
374 	for (timeout = 100; timeout > 0; --timeout) {
375 		mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
376 		if ((mii_control & MII_CNTL_RESET) == 0)
377 			break;
378 		mdelay(1);
379 	}
380 	if (mii_control & MII_CNTL_RESET) {
381 		printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
382 		return -1;
383 	}
384 	return 0;
385 }
386 
387 int
am79c874_status(struct net_device * dev,int phy_addr,u16 * link,u16 * speed)388 am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
389 {
390 	u16 mii_data;
391 	struct au1000_private *aup;
392 
393 	// printk("am79c874_status\n");
394 	if (!dev) {
395 		printk(KERN_ERR "am79c874_status error: NULL dev\n");
396 		return -1;
397 	}
398 
399 	aup = (struct au1000_private *) dev->priv;
400 	mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
401 
402 	if (mii_data & MII_STAT_LINK) {
403 		*link = 1;
404 		mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
405 		if (mii_data & MII_AMD_PHY_STAT_SPD) {
406 			if (mii_data & MII_AMD_PHY_STAT_FDX) {
407 				*speed = IF_PORT_100BASEFX;
408 				dev->if_port = IF_PORT_100BASEFX;
409 			}
410 			else {
411 				*speed = IF_PORT_100BASETX;
412 				dev->if_port = IF_PORT_100BASETX;
413 			}
414 		}
415 		else {
416 			*speed = IF_PORT_10BASET;
417 			dev->if_port = IF_PORT_10BASET;
418 		}
419 
420 	}
421 	else {
422 		*link = 0;
423 		*speed = 0;
424 		dev->if_port = IF_PORT_UNKNOWN;
425 	}
426 	return 0;
427 }
428 
lxt971a_init(struct net_device * dev,int phy_addr)429 int lxt971a_init(struct net_device *dev, int phy_addr)
430 {
431 	if (au1000_debug > 4)
432 		printk("lxt971a_init\n");
433 
434 	/* restart auto-negotiation */
435 	mdio_write(dev, phy_addr, MII_CONTROL,
436 		   MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
437 
438 	/* set up LEDs to correct display */
439 	mdio_write(dev, phy_addr, 20, 0x0422);
440 
441 	if (au1000_debug > 4)
442 		dump_mii(dev, phy_addr);
443 	return 0;
444 }
445 
lxt971a_reset(struct net_device * dev,int phy_addr)446 int lxt971a_reset(struct net_device *dev, int phy_addr)
447 {
448 	s16 mii_control, timeout;
449 
450 	if (au1000_debug > 4) {
451 		printk("lxt971a_reset\n");
452 		dump_mii(dev, phy_addr);
453 	}
454 
455 	mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
456 	mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
457 	mdelay(1);
458 	for (timeout = 100; timeout > 0; --timeout) {
459 		mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
460 		if ((mii_control & MII_CNTL_RESET) == 0)
461 			break;
462 		mdelay(1);
463 	}
464 	if (mii_control & MII_CNTL_RESET) {
465 		printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
466 		return -1;
467 	}
468 	return 0;
469 }
470 
471 int
lxt971a_status(struct net_device * dev,int phy_addr,u16 * link,u16 * speed)472 lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
473 {
474 	u16 mii_data;
475 	struct au1000_private *aup;
476 
477 	if (!dev) {
478 		printk(KERN_ERR "lxt971a_status error: NULL dev\n");
479 		return -1;
480 	}
481 	aup = (struct au1000_private *) dev->priv;
482 
483 	mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
484 	if (mii_data & MII_STAT_LINK) {
485 		*link = 1;
486 		mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
487 		if (mii_data & MII_INTEL_PHY_STAT_SPD) {
488 			if (mii_data & MII_INTEL_PHY_STAT_FDX) {
489 				*speed = IF_PORT_100BASEFX;
490 				dev->if_port = IF_PORT_100BASEFX;
491 			}
492 			else {
493 				*speed = IF_PORT_100BASETX;
494 				dev->if_port = IF_PORT_100BASETX;
495 			}
496 		}
497 		else  {
498 			*speed = IF_PORT_10BASET;
499 			dev->if_port = IF_PORT_10BASET;
500 		}
501 
502 	}
503 	else {
504 		*link = 0;
505 		*speed = 0;
506 		dev->if_port = IF_PORT_UNKNOWN;
507 	}
508 	return 0;
509 }
510 
ks8995m_init(struct net_device * dev,int phy_addr)511 int ks8995m_init(struct net_device *dev, int phy_addr)
512 {
513 	s16 data;
514 
515 //	printk("ks8995m_init\n");
516 	/* Stop auto-negotiation */
517 	data = mdio_read(dev, phy_addr, MII_CONTROL);
518 	mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
519 
520 	/* Set advertisement to 10/100 and Half/Full duplex
521 	 * (full capabilities) */
522 	data = mdio_read(dev, phy_addr, MII_ANADV);
523 	data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
524 	mdio_write(dev, phy_addr, MII_ANADV, data);
525 
526 	/* Restart auto-negotiation */
527 	data = mdio_read(dev, phy_addr, MII_CONTROL);
528 	data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
529 	mdio_write(dev, phy_addr, MII_CONTROL, data);
530 
531 	if (au1000_debug > 4) dump_mii(dev, phy_addr);
532 
533 	return 0;
534 }
535 
ks8995m_reset(struct net_device * dev,int phy_addr)536 int ks8995m_reset(struct net_device *dev, int phy_addr)
537 {
538 	s16 mii_control, timeout;
539 
540 //	printk("ks8995m_reset\n");
541 	mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
542 	mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
543 	mdelay(1);
544 	for (timeout = 100; timeout > 0; --timeout) {
545 		mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
546 		if ((mii_control & MII_CNTL_RESET) == 0)
547 			break;
548 		mdelay(1);
549 	}
550 	if (mii_control & MII_CNTL_RESET) {
551 		printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
552 		return -1;
553 	}
554 	return 0;
555 }
556 
ks8995m_status(struct net_device * dev,int phy_addr,u16 * link,u16 * speed)557 int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
558 {
559 	u16 mii_data;
560 	struct au1000_private *aup;
561 
562 	if (!dev) {
563 		printk(KERN_ERR "ks8995m_status error: NULL dev\n");
564 		return -1;
565 	}
566 	aup = (struct au1000_private *) dev->priv;
567 
568 	mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
569 	if (mii_data & MII_STAT_LINK) {
570 		*link = 1;
571 		mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
572 		if (mii_data & MII_AUX_100) {
573 			if (mii_data & MII_AUX_FDX) {
574 				*speed = IF_PORT_100BASEFX;
575 				dev->if_port = IF_PORT_100BASEFX;
576 			}
577 			else {
578 				*speed = IF_PORT_100BASETX;
579 				dev->if_port = IF_PORT_100BASETX;
580 			}
581 		}
582 		else  {
583 			*speed = IF_PORT_10BASET;
584 			dev->if_port = IF_PORT_10BASET;
585 		}
586 
587 	}
588 	else {
589 		*link = 0;
590 		*speed = 0;
591 		dev->if_port = IF_PORT_UNKNOWN;
592 	}
593 	return 0;
594 }
595 
596 #ifdef CONFIG_MIPS_BOSPORUS
stub_init(struct net_device * dev,int phy_addr)597 int stub_init(struct net_device *dev, int phy_addr)
598 {
599 	//printk("PHY stub_init\n");
600 	return 0;
601 }
602 
stub_reset(struct net_device * dev,int phy_addr)603 int stub_reset(struct net_device *dev, int phy_addr)
604 {
605 	//printk("PHY stub_reset\n");
606 	return 0;
607 }
608 
609 int
stub_status(struct net_device * dev,int phy_addr,u16 * link,u16 * speed)610 stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
611 {
612 	//printk("PHY stub_status\n");
613 	*link = 1;
614 	/* hmmm, revisit */
615 	*speed = IF_PORT_100BASEFX;
616 	dev->if_port = IF_PORT_100BASEFX;
617 	return 0;
618 }
619 #endif
620 
621 struct phy_ops bcm_5201_ops = {
622 	bcm_5201_init,
623 	bcm_5201_reset,
624 	bcm_5201_status,
625 };
626 
627 struct phy_ops am79c874_ops = {
628 	am79c874_init,
629 	am79c874_reset,
630 	am79c874_status,
631 };
632 
633 struct phy_ops am79c901_ops = {
634 	am79c901_init,
635 	am79c901_reset,
636 	am79c901_status,
637 };
638 
639 struct phy_ops lsi_80227_ops = {
640 	lsi_80227_init,
641 	lsi_80227_reset,
642 	lsi_80227_status,
643 };
644 
645 struct phy_ops lxt971a_ops = {
646 	lxt971a_init,
647 	lxt971a_reset,
648 	lxt971a_status,
649 };
650 
651 struct phy_ops ks8995m_ops = {
652 	ks8995m_init,
653 	ks8995m_reset,
654 	ks8995m_status,
655 };
656 
657 #ifdef CONFIG_MIPS_BOSPORUS
658 struct phy_ops stub_ops = {
659 	stub_init,
660 	stub_reset,
661 	stub_status,
662 };
663 #endif
664 
665 static struct mii_chip_info {
666 	const char * name;
667 	u16 phy_id0;
668 	u16 phy_id1;
669 	struct phy_ops *phy_ops;
670 	int dual_phy;
671 } mii_chip_table[] = {
672 	{"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
673 	{"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
674 	{"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
675 	{"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
676 	{"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
677 	{"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
678 	{"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
679 	{"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
680 #ifdef CONFIG_MIPS_BOSPORUS
681 	{"Stub", 0x1234, 0x5678, &stub_ops },
682 #endif
683 	{0,},
684 };
685 
mdio_read(struct net_device * dev,int phy_id,int reg)686 static int mdio_read(struct net_device *dev, int phy_id, int reg)
687 {
688 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
689 	volatile u32 *mii_control_reg;
690 	volatile u32 *mii_data_reg;
691 	u32 timedout = 20;
692 	u32 mii_control;
693 
694 	#ifdef CONFIG_BCM5222_DUAL_PHY
695 	/* First time we probe, it's for the mac0 phy.
696 	 * Since we haven't determined yet that we have a dual phy,
697 	 * aup->mii->mii_control_reg won't be setup and we'll
698 	 * default to the else statement.
699 	 * By the time we probe for the mac1 phy, the mii_control_reg
700 	 * will be setup to be the address of the mac0 phy control since
701 	 * both phys are controlled through mac0.
702 	 */
703 	if (aup->mii && aup->mii->mii_control_reg) {
704 		mii_control_reg = aup->mii->mii_control_reg;
705 		mii_data_reg = aup->mii->mii_data_reg;
706 	}
707 	else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
708 		/* assume both phys are controlled through mac0 */
709 		mii_control_reg = au_macs[0]->mii->mii_control_reg;
710 		mii_data_reg = au_macs[0]->mii->mii_data_reg;
711 	}
712 	else
713 	#endif
714 	{
715 		/* default control and data reg addresses */
716 		mii_control_reg = &aup->mac->mii_control;
717 		mii_data_reg = &aup->mac->mii_data;
718 	}
719 
720 	while (*mii_control_reg & MAC_MII_BUSY) {
721 		mdelay(1);
722 		if (--timedout == 0) {
723 			printk(KERN_ERR "%s: read_MII busy timeout!!\n",
724 					dev->name);
725 			return -1;
726 		}
727 	}
728 
729 	mii_control = MAC_SET_MII_SELECT_REG(reg) |
730 		MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
731 
732 	*mii_control_reg = mii_control;
733 
734 	timedout = 20;
735 	while (*mii_control_reg & MAC_MII_BUSY) {
736 		mdelay(1);
737 		if (--timedout == 0) {
738 			printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
739 					dev->name);
740 			return -1;
741 		}
742 	}
743 	return (int)*mii_data_reg;
744 }
745 
mdio_write(struct net_device * dev,int phy_id,int reg,u16 value)746 static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
747 {
748 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
749 	volatile u32 *mii_control_reg;
750 	volatile u32 *mii_data_reg;
751 	u32 timedout = 20;
752 	u32 mii_control;
753 
754 	#ifdef CONFIG_BCM5222_DUAL_PHY
755 	if (aup->mii && aup->mii->mii_control_reg) {
756 		mii_control_reg = aup->mii->mii_control_reg;
757 		mii_data_reg = aup->mii->mii_data_reg;
758 	}
759 	else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
760 		/* assume both phys are controlled through mac0 */
761 		mii_control_reg = au_macs[0]->mii->mii_control_reg;
762 		mii_data_reg = au_macs[0]->mii->mii_data_reg;
763 	}
764 	else
765 	#endif
766 	{
767 		/* default control and data reg addresses */
768 		mii_control_reg = &aup->mac->mii_control;
769 		mii_data_reg = &aup->mac->mii_data;
770 	}
771 
772 	while (*mii_control_reg & MAC_MII_BUSY) {
773 		mdelay(1);
774 		if (--timedout == 0) {
775 			printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
776 					dev->name);
777 			return;
778 		}
779 	}
780 
781 	mii_control = MAC_SET_MII_SELECT_REG(reg) |
782 		MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
783 
784 	*mii_data_reg = value;
785 	*mii_control_reg = mii_control;
786 }
787 
788 
dump_mii(struct net_device * dev,int phy_id)789 static void dump_mii(struct net_device *dev, int phy_id)
790 {
791 	int i, val;
792 
793 	for (i = 0; i < 7; i++) {
794 		if ((val = mdio_read(dev, phy_id, i)) >= 0)
795 			printk("%s: MII Reg %d=%x\n", dev->name, i, val);
796 	}
797 	for (i = 16; i < 25; i++) {
798 		if ((val = mdio_read(dev, phy_id, i)) >= 0)
799 			printk("%s: MII Reg %d=%x\n", dev->name, i, val);
800 	}
801 }
802 
mii_probe(struct net_device * dev)803 static int mii_probe (struct net_device * dev)
804 {
805 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
806 	int phy_addr;
807 #ifdef CONFIG_MIPS_BOSPORUS
808 	int phy_found=0;
809 #endif
810 
811 	/* search for total of 32 possible mii phy addresses */
812 	for (phy_addr = 0; phy_addr < 32; phy_addr++) {
813 		u16 mii_status;
814 		u16 phy_id0, phy_id1;
815 		int i;
816 
817 		#ifdef CONFIG_BCM5222_DUAL_PHY
818 		/* Mask the already found phy, try next one */
819 		if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
820 			if (au_macs[0]->phy_addr == phy_addr)
821 				continue;
822 		}
823 		#endif
824 
825 		mii_status = mdio_read(dev, phy_addr, MII_STATUS);
826 		if (mii_status == 0xffff || mii_status == 0x0000)
827 			/* the mii is not accessable, try next one */
828 			continue;
829 
830 		phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
831 		phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
832 
833 		/* search our mii table for the current mii */
834 		for (i = 0; mii_chip_table[i].phy_id1; i++) {
835 			if (phy_id0 == mii_chip_table[i].phy_id0 &&
836 			    phy_id1 == mii_chip_table[i].phy_id1) {
837 				struct mii_phy * mii_phy = aup->mii;
838 
839 				printk(KERN_INFO "%s: %s at phy address %d\n",
840 				       dev->name, mii_chip_table[i].name,
841 				       phy_addr);
842 #ifdef CONFIG_MIPS_BOSPORUS
843 				phy_found = 1;
844 #endif
845 				mii_phy->chip_info = mii_chip_table+i;
846 				aup->phy_addr = phy_addr;
847 				aup->phy_ops = mii_chip_table[i].phy_ops;
848 				aup->phy_ops->phy_init(dev,phy_addr);
849 
850 				// Check for dual-phy and then store required
851 				// values and set indicators. We need to do
852 				// this now since mdio_{read,write} need the
853 				// control and data register addresses.
854 				#ifdef CONFIG_BCM5222_DUAL_PHY
855 				if ( mii_chip_table[i].dual_phy) {
856 
857 					/* assume both phys are controlled
858 					 * through MAC0. Board specific? */
859 
860 					/* sanity check */
861 					if (!au_macs[0] || !au_macs[0]->mii)
862 						return -1;
863 					aup->mii->mii_control_reg = (u32 *)
864 						&au_macs[0]->mac->mii_control;
865 					aup->mii->mii_data_reg = (u32 *)
866 						&au_macs[0]->mac->mii_data;
867 				}
868 				#endif
869 				goto found;
870 			}
871 		}
872 	}
873 found:
874 
875 #ifdef CONFIG_MIPS_BOSPORUS
876 	/* This is a workaround for the Micrel/Kendin 5 port switch
877 	   The second MAC doesn't see a PHY connected... so we need to
878 	   trick it into thinking we have one.
879 
880 	   If this kernel is run on another Au1500 development board
881 	   the stub will be found as well as the actual PHY. However,
882 	   the last found PHY will be used... usually at Addr 31 (Db1500).
883 	*/
884 	if ( (!phy_found) )
885 	{
886 		u16 phy_id0, phy_id1;
887 		int i;
888 
889 		phy_id0 = 0x1234;
890 		phy_id1 = 0x5678;
891 
892 		/* search our mii table for the current mii */
893 		for (i = 0; mii_chip_table[i].phy_id1; i++) {
894 			if (phy_id0 == mii_chip_table[i].phy_id0 &&
895 			    phy_id1 == mii_chip_table[i].phy_id1) {
896 				struct mii_phy * mii_phy;
897 
898 				printk(KERN_INFO "%s: %s at phy address %d\n",
899 				       dev->name, mii_chip_table[i].name,
900 				       phy_addr);
901 				mii_phy = kmalloc(sizeof(struct mii_phy),
902 						GFP_KERNEL);
903 				if (mii_phy) {
904 					mii_phy->chip_info = mii_chip_table+i;
905 					aup->phy_addr = phy_addr;
906 					mii_phy->next = aup->mii;
907 					aup->phy_ops =
908 						mii_chip_table[i].phy_ops;
909 					aup->mii = mii_phy;
910 					aup->phy_ops->phy_init(dev,phy_addr);
911 				} else {
912 					printk(KERN_ERR "%s: out of memory\n",
913 							dev->name);
914 					return -1;
915 				}
916 				mii_phy->chip_info = mii_chip_table+i;
917 				aup->phy_addr = phy_addr;
918 				aup->phy_ops = mii_chip_table[i].phy_ops;
919 				aup->phy_ops->phy_init(dev,phy_addr);
920 				break;
921 			}
922 		}
923 	}
924 	if (aup->mac_id == 0) {
925 		/* the Bosporus phy responds to addresses 0-5 but
926 		 * 5 is the correct one.
927 		 */
928 		aup->phy_addr = 5;
929 	}
930 #endif
931 
932 	if (aup->mii == NULL) {
933 		printk(KERN_ERR "%s: Au1x No MII transceivers found!\n",
934 				dev->name);
935 		return -1;
936 	}
937 
938 	printk(KERN_INFO "%s: Using %s as default\n",
939 			dev->name, aup->mii->chip_info->name);
940 
941 	return 0;
942 }
943 
944 
945 /*
946  * Buffer allocation/deallocation routines. The buffer descriptor returned
947  * has the virtual and dma address of a buffer suitable for
948  * both, receive and transmit operations.
949  */
GetFreeDB(struct au1000_private * aup)950 static db_dest_t *GetFreeDB(struct au1000_private *aup)
951 {
952 	db_dest_t *pDB;
953 	pDB = aup->pDBfree;
954 
955 	if (pDB) {
956 		aup->pDBfree = pDB->pnext;
957 	}
958 	return pDB;
959 }
960 
ReleaseDB(struct au1000_private * aup,db_dest_t * pDB)961 void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
962 {
963 	db_dest_t *pDBfree = aup->pDBfree;
964 	if (pDBfree)
965 		pDBfree->pnext = pDB;
966 	aup->pDBfree = pDB;
967 }
968 
969 
970 /*
971   DMA memory allocation, derived from pci_alloc_consistent.
972   However, the Au1000 data cache is coherent (when programmed
973   so), therefore we return KSEG0 address, not KSEG1.
974 */
dma_alloc(size_t size,dma_addr_t * dma_handle)975 static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
976 {
977 	void *ret;
978 	int gfp = GFP_ATOMIC | GFP_DMA;
979 
980 	ret = (void *) __get_free_pages(gfp, get_order(size));
981 
982 	if (ret != NULL) {
983 		memset(ret, 0, size);
984 		*dma_handle = virt_to_bus(ret);
985 		ret = (void *)KSEG0ADDR(ret);
986 	}
987 	return ret;
988 }
989 
990 
dma_free(void * vaddr,size_t size)991 static void dma_free(void *vaddr, size_t size)
992 {
993 	vaddr = (void *)KSEG0ADDR(vaddr);
994 	free_pages((unsigned long) vaddr, get_order(size));
995 }
996 
997 
enable_rx_tx(struct net_device * dev)998 static void enable_rx_tx(struct net_device *dev)
999 {
1000 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1001 
1002 	if (au1000_debug > 4)
1003 		printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
1004 
1005 	aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
1006 	au_sync_delay(10);
1007 }
1008 
hard_stop(struct net_device * dev)1009 static void hard_stop(struct net_device *dev)
1010 {
1011 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1012 
1013 	if (au1000_debug > 4)
1014 		printk(KERN_INFO "%s: hard stop\n", dev->name);
1015 
1016 	aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
1017 	au_sync_delay(10);
1018 }
1019 
1020 
reset_mac(struct net_device * dev)1021 static void reset_mac(struct net_device *dev)
1022 {
1023 	int i;
1024 	u32 flags;
1025 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1026 
1027 	if (au1000_debug > 4)
1028 		printk(KERN_INFO "%s: reset mac, aup %x\n",
1029 				dev->name, (unsigned)aup);
1030 
1031 	spin_lock_irqsave(&aup->lock, flags);
1032 	del_timer(&aup->timer);
1033 	hard_stop(dev);
1034 	#ifdef CONFIG_BCM5222_DUAL_PHY
1035 	if (aup->mac_id != 0) {
1036 	#endif
1037 		/* If BCM5222, we can't leave MAC0 in reset because then
1038 		 * we can't access the dual phy for ETH1 */
1039 		*aup->enable = MAC_EN_CLOCK_ENABLE;
1040 		au_sync_delay(2);
1041 		*aup->enable = 0;
1042 		au_sync_delay(2);
1043 	#ifdef CONFIG_BCM5222_DUAL_PHY
1044 	}
1045 	#endif
1046 	aup->tx_full = 0;
1047 	for (i = 0; i < NUM_RX_DMA; i++) {
1048 		/* reset control bits */
1049 		aup->rx_dma_ring[i]->buff_stat &= ~0xf;
1050 	}
1051 	for (i = 0; i < NUM_TX_DMA; i++) {
1052 		/* reset control bits */
1053 		aup->tx_dma_ring[i]->buff_stat &= ~0xf;
1054 	}
1055 	spin_unlock_irqrestore(&aup->lock, flags);
1056 }
1057 
1058 
1059 /*
1060  * Setup the receive and transmit "rings".  These pointers are the addresses
1061  * of the rx and tx MAC DMA registers so they are fixed by the hardware --
1062  * these are not descriptors sitting in memory.
1063  */
1064 static void
setup_hw_rings(struct au1000_private * aup,u32 rx_base,u32 tx_base)1065 setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
1066 {
1067 	int i;
1068 
1069 	for (i = 0; i < NUM_RX_DMA; i++) {
1070 		aup->rx_dma_ring[i] =
1071 			(volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
1072 	}
1073 	for (i = 0; i < NUM_TX_DMA; i++) {
1074 		aup->tx_dma_ring[i] =
1075 			(volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
1076 	}
1077 }
1078 
1079 static struct {
1080 	int port;
1081 	u32 base_addr;
1082 	u32 macen_addr;
1083 	int irq;
1084 	struct net_device *dev;
1085 } iflist[2];
1086 
1087 static int num_ifs;
1088 
1089 /*
1090  * Setup the base address and interupt of the Au1xxx ethernet macs
1091  * based on cpu type and whether the interface is enabled in sys_pinfunc
1092  * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1093  */
au1000_init_module(void)1094 static int __init au1000_init_module(void)
1095 {
1096 	struct cpuinfo_mips *c = &current_cpu_data;
1097 	int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
1098 	struct net_device *dev;
1099 	int i, found_one = 0;
1100 
1101 	switch (c->cputype) {
1102 #ifdef CONFIG_SOC_AU1000
1103 	case CPU_AU1000:
1104 		num_ifs = 2 - ni;
1105 		iflist[0].base_addr = AU1000_ETH0_BASE;
1106 		iflist[1].base_addr = AU1000_ETH1_BASE;
1107 		iflist[0].macen_addr = AU1000_MAC0_ENABLE;
1108 		iflist[1].macen_addr = AU1000_MAC1_ENABLE;
1109 		iflist[0].irq = AU1000_MAC0_DMA_INT;
1110 		iflist[1].irq = AU1000_MAC1_DMA_INT;
1111 		break;
1112 #endif
1113 #ifdef CONFIG_SOC_AU1100
1114 	case CPU_AU1100:
1115 		num_ifs = 1 - ni;
1116 		iflist[0].base_addr = AU1100_ETH0_BASE;
1117 		iflist[0].macen_addr = AU1100_MAC0_ENABLE;
1118 		iflist[0].irq = AU1100_MAC0_DMA_INT;
1119 		break;
1120 #endif
1121 #ifdef CONFIG_SOC_AU1500
1122 	case CPU_AU1500:
1123 		num_ifs = 2 - ni;
1124 		iflist[0].base_addr = AU1500_ETH0_BASE;
1125 		iflist[1].base_addr = AU1500_ETH1_BASE;
1126 		iflist[0].macen_addr = AU1500_MAC0_ENABLE;
1127 		iflist[1].macen_addr = AU1500_MAC1_ENABLE;
1128 		iflist[0].irq = AU1500_MAC0_DMA_INT;
1129 		iflist[1].irq = AU1500_MAC1_DMA_INT;
1130 		break;
1131 #endif
1132 #ifdef CONFIG_SOC_AU1550
1133 	case CPU_AU1550:
1134 		num_ifs = 2 - ni;
1135 		iflist[0].base_addr = AU1550_ETH0_BASE;
1136 		iflist[1].base_addr = AU1550_ETH1_BASE;
1137 		iflist[0].macen_addr = AU1550_MAC0_ENABLE;
1138 		iflist[1].macen_addr = AU1550_MAC1_ENABLE;
1139 		iflist[0].irq = AU1550_MAC0_DMA_INT;
1140 		iflist[1].irq = AU1550_MAC1_DMA_INT;
1141 		break;
1142 #endif
1143 	default:
1144 		num_ifs = 0;
1145 	}
1146 	for(i = 0; i < num_ifs; i++) {
1147 		dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
1148 		iflist[i].dev = dev;
1149 		if (dev)
1150 			found_one++;
1151 	}
1152 	if (!found_one)
1153 		return -ENODEV;
1154 	return 0;
1155 }
1156 
1157 static struct net_device *
au1000_probe(u32 ioaddr,int irq,int port_num)1158 au1000_probe(u32 ioaddr, int irq, int port_num)
1159 {
1160 	static unsigned version_printed = 0;
1161 	struct au1000_private *aup = NULL;
1162 	struct net_device *dev = NULL;
1163 	db_dest_t *pDB, *pDBfree;
1164 	char *pmac, *argptr;
1165 	char ethaddr[6];
1166 	int i, err;
1167 
1168 	if (!request_region(ioaddr, MAC_IOSIZE, "Au1x00 ENET"))
1169 		return NULL;
1170 
1171 	if (version_printed++ == 0)
1172 		printk(version);
1173 
1174 	dev = alloc_etherdev(sizeof(struct au1000_private));
1175 	if (!dev) {
1176 		printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");
1177 		return NULL;
1178 	}
1179 
1180 	if ((err = register_netdev(dev))) {
1181 		printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
1182 				err);
1183 		kfree(dev);
1184 		return NULL;
1185 	}
1186 
1187 	printk("%s: Au1x Ethernet found at 0x%x, irq %d\n",
1188 			dev->name, ioaddr, irq);
1189 
1190 	aup = dev->priv;
1191 
1192 	/* Allocate the data buffers */
1193 	aup->vaddr = (u32)dma_alloc(MAX_BUF_SIZE *
1194 			(NUM_TX_BUFFS+NUM_RX_BUFFS), &aup->dma_addr);
1195 	if (!aup->vaddr) {
1196 		kfree(dev);
1197 		release_region(ioaddr, MAC_IOSIZE);
1198 		return NULL;
1199 	}
1200 
1201 	/* aup->mac is the base address of the MAC's registers */
1202 	aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
1203 	/* Setup some variables for quick register address access */
1204 	if (ioaddr == iflist[0].base_addr)
1205 	{
1206 		/* check env variables first */
1207 		if (!get_ethernet_addr(ethaddr)) {
1208 			memcpy(au1000_mac_addr, ethaddr, sizeof(dev->dev_addr));
1209 		} else {
1210 			/* Check command line */
1211 			argptr = prom_getcmdline();
1212 			if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
1213 				printk(KERN_INFO "%s: No mac address found\n",
1214 						dev->name);
1215 				/* use the hard coded mac addresses */
1216 			} else {
1217 				str2eaddr(ethaddr, pmac + strlen("ethaddr="));
1218 				memcpy(au1000_mac_addr, ethaddr,
1219 						sizeof(dev->dev_addr));
1220 			}
1221 		}
1222 			aup->enable = (volatile u32 *)
1223 				((unsigned long)iflist[0].macen_addr);
1224 		memcpy(dev->dev_addr, au1000_mac_addr, sizeof(dev->dev_addr));
1225 		setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1226 		aup->mac_id = 0;
1227 		au_macs[0] = aup;
1228 	}
1229 		else
1230 	if (ioaddr == iflist[1].base_addr)
1231 	{
1232 			aup->enable = (volatile u32 *)
1233 				((unsigned long)iflist[1].macen_addr);
1234 		memcpy(dev->dev_addr, au1000_mac_addr, sizeof(dev->dev_addr));
1235 		dev->dev_addr[4] += 0x10;
1236 		setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1237 		aup->mac_id = 1;
1238 		au_macs[1] = aup;
1239 	}
1240 	else
1241 	{
1242 		printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
1243 	}
1244 
1245 	/* bring the device out of reset, otherwise probing the mii
1246 	 * will hang */
1247 	*aup->enable = MAC_EN_CLOCK_ENABLE;
1248 	au_sync_delay(2);
1249 	*aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1250 		MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1251 	au_sync_delay(2);
1252 
1253 	aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
1254 	if (!aup->mii) {
1255 		printk(KERN_ERR "%s: out of memory\n", dev->name);
1256 		goto err_out;
1257 	}
1258 	aup->mii->mii_control_reg = 0;
1259 	aup->mii->mii_data_reg = 0;
1260 
1261 	if (mii_probe(dev) != 0) {
1262 		goto err_out;
1263 	}
1264 
1265 	pDBfree = NULL;
1266 	/* setup the data buffer descriptors and attach a buffer to each one */
1267 	pDB = aup->db;
1268 	for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1269 		pDB->pnext = pDBfree;
1270 		pDBfree = pDB;
1271 		pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1272 		pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1273 		pDB++;
1274 	}
1275 	aup->pDBfree = pDBfree;
1276 
1277 	for (i = 0; i < NUM_RX_DMA; i++) {
1278 		pDB = GetFreeDB(aup);
1279 		if (!pDB) {
1280 			goto err_out;
1281 		}
1282 		aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1283 		aup->rx_db_inuse[i] = pDB;
1284 	}
1285 	for (i = 0; i < NUM_TX_DMA; i++) {
1286 		pDB = GetFreeDB(aup);
1287 		if (!pDB) {
1288 			goto err_out;
1289 		}
1290 		aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1291 		aup->tx_dma_ring[i]->len = 0;
1292 		aup->tx_db_inuse[i] = pDB;
1293 	}
1294 
1295 	spin_lock_init(&aup->lock);
1296 	dev->base_addr = ioaddr;
1297 	dev->irq = irq;
1298 	dev->open = au1000_open;
1299 	dev->hard_start_xmit = au1000_tx;
1300 	dev->stop = au1000_close;
1301 	dev->get_stats = au1000_get_stats;
1302 	dev->set_multicast_list = &set_rx_mode;
1303 	dev->do_ioctl = &au1000_ioctl;
1304 	dev->set_config = &au1000_set_config;
1305 	dev->tx_timeout = au1000_tx_timeout;
1306 	dev->watchdog_timeo = ETH_TX_TIMEOUT;
1307 
1308 	/*
1309 	 * The boot code uses the ethernet controller, so reset it to start
1310 	 * fresh.  au1000_init() expects that the device is in reset state.
1311 	 */
1312 	reset_mac(dev);
1313 
1314 	return dev;
1315 
1316 err_out:
1317 	/* here we should have a valid dev plus aup-> register addresses
1318 	 * so we can reset the mac properly.*/
1319 	reset_mac(dev);
1320 	if (aup->mii)
1321 		kfree(aup->mii);
1322 	for (i = 0; i < NUM_RX_DMA; i++) {
1323 		if (aup->rx_db_inuse[i])
1324 			ReleaseDB(aup, aup->rx_db_inuse[i]);
1325 	}
1326 	for (i = 0; i < NUM_TX_DMA; i++) {
1327 		if (aup->tx_db_inuse[i])
1328 			ReleaseDB(aup, aup->tx_db_inuse[i]);
1329 	}
1330 	dma_free((void *)aup->vaddr, MAX_BUF_SIZE *
1331 			(NUM_TX_BUFFS+NUM_RX_BUFFS));
1332 	unregister_netdev(dev);
1333 	kfree(dev);
1334 	release_region(ioaddr, MAC_IOSIZE);
1335 	return NULL;
1336 }
1337 
1338 /*
1339  * Initialize the interface.
1340  *
1341  * When the device powers up, the clocks are disabled and the
1342  * mac is in reset state.  When the interface is closed, we
1343  * do the same -- reset the device and disable the clocks to
1344  * conserve power. Thus, whenever au1000_init() is called,
1345  * the device should already be in reset state.
1346  */
au1000_init(struct net_device * dev)1347 static int au1000_init(struct net_device *dev)
1348 {
1349 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1350 	u32 flags;
1351 	int i;
1352 	u32 control;
1353 	u16 link, speed;
1354 
1355 	if (au1000_debug > 4)
1356 		printk("%s: au1000_init\n", dev->name);
1357 
1358 	spin_lock_irqsave(&aup->lock, flags);
1359 
1360 	/* bring the device out of reset */
1361 	*aup->enable = MAC_EN_CLOCK_ENABLE;
1362         au_sync_delay(2);
1363 	*aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1364 		MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1365 	au_sync_delay(20);
1366 
1367 	aup->mac->control = 0;
1368 	aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
1369 	aup->tx_tail = aup->tx_head;
1370 	aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
1371 
1372 	aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
1373 	aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
1374 		dev->dev_addr[1]<<8 | dev->dev_addr[0];
1375 
1376 	for (i = 0; i < NUM_RX_DMA; i++) {
1377 		aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
1378 	}
1379 	au_sync();
1380 
1381 	aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1382 	control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
1383 #ifndef CONFIG_CPU_LITTLE_ENDIAN
1384 	control |= MAC_BIG_ENDIAN;
1385 #endif
1386 	if (link && (dev->if_port == IF_PORT_100BASEFX)) {
1387 		control |= MAC_FULL_DUPLEX;
1388 	}
1389 	aup->mac->control = control;
1390 	aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
1391 	au_sync();
1392 
1393 	spin_unlock_irqrestore(&aup->lock, flags);
1394 	return 0;
1395 }
1396 
au1000_timer(unsigned long data)1397 static void au1000_timer(unsigned long data)
1398 {
1399 	struct net_device *dev = (struct net_device *)data;
1400 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1401 	unsigned char if_port;
1402 	u16 link, speed;
1403 
1404 	if (!dev) {
1405 		/* fatal error, don't restart the timer */
1406 		printk(KERN_ERR "au1000_timer error: NULL dev\n");
1407 		return;
1408 	}
1409 
1410 	if_port = dev->if_port;
1411 	if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
1412 		if (link) {
1413 			if (!(dev->flags & IFF_RUNNING)) {
1414 				netif_carrier_on(dev);
1415 				dev->flags |= IFF_RUNNING;
1416 				printk(KERN_INFO "%s: link up\n", dev->name);
1417 			}
1418 		}
1419 		else {
1420 			if (dev->flags & IFF_RUNNING) {
1421 				netif_carrier_off(dev);
1422 				dev->flags &= ~IFF_RUNNING;
1423 				dev->if_port = 0;
1424 				printk(KERN_INFO "%s: link down\n", dev->name);
1425 			}
1426 		}
1427 	}
1428 
1429 	if (link && (dev->if_port != if_port) &&
1430 			(dev->if_port != IF_PORT_UNKNOWN)) {
1431 		hard_stop(dev);
1432 		if (dev->if_port == IF_PORT_100BASEFX) {
1433 			printk(KERN_INFO "%s: going to full duplex\n",
1434 					dev->name);
1435 			aup->mac->control |= MAC_FULL_DUPLEX;
1436 			au_sync_delay(1);
1437 		}
1438 		else {
1439 			aup->mac->control &= ~MAC_FULL_DUPLEX;
1440 			au_sync_delay(1);
1441 		}
1442 		enable_rx_tx(dev);
1443 	}
1444 
1445 	aup->timer.expires = RUN_AT((1*HZ));
1446 	aup->timer.data = (unsigned long)dev;
1447 	aup->timer.function = &au1000_timer; /* timer handler */
1448 	add_timer(&aup->timer);
1449 
1450 }
1451 
au1000_open(struct net_device * dev)1452 static int au1000_open(struct net_device *dev)
1453 {
1454 	int retval;
1455 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1456 
1457 	MOD_INC_USE_COUNT;
1458 
1459 	if (au1000_debug > 4)
1460 		printk("%s: open: dev=%p\n", dev->name, dev);
1461 
1462 	if ((retval = au1000_init(dev))) {
1463 		printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
1464 		free_irq(dev->irq, dev);
1465 		MOD_DEC_USE_COUNT;
1466 		return retval;
1467 	}
1468 	netif_start_queue(dev);
1469 
1470 	if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
1471 					dev->name, dev))) {
1472 		printk(KERN_ERR "%s: unable to get IRQ %d\n",
1473 				dev->name, dev->irq);
1474 		MOD_DEC_USE_COUNT;
1475 		return retval;
1476 	}
1477 
1478 	aup->timer.expires = RUN_AT((3*HZ));
1479 	aup->timer.data = (unsigned long)dev;
1480 	aup->timer.function = &au1000_timer; /* timer handler */
1481 	add_timer(&aup->timer);
1482 
1483 	if (au1000_debug > 4)
1484 		printk("%s: open: Initialization done.\n", dev->name);
1485 
1486 	return 0;
1487 }
1488 
au1000_close(struct net_device * dev)1489 static int au1000_close(struct net_device *dev)
1490 {
1491 	u32 flags;
1492 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1493 
1494 	if (au1000_debug > 4)
1495 		printk("%s: close: dev=%p\n", dev->name, dev);
1496 
1497 	reset_mac(dev);
1498 
1499 	spin_lock_irqsave(&aup->lock, flags);
1500 
1501 	/* stop the device */
1502 	netif_stop_queue(dev);
1503 
1504 	/* disable the interrupt */
1505 	free_irq(dev->irq, dev);
1506 	spin_unlock_irqrestore(&aup->lock, flags);
1507 
1508 	MOD_DEC_USE_COUNT;
1509 	return 0;
1510 }
1511 
au1000_cleanup_module(void)1512 static void __exit au1000_cleanup_module(void)
1513 {
1514 	int i, j;
1515 	struct net_device *dev;
1516 	struct au1000_private *aup;
1517 
1518 	for (i = 0; i < num_ifs; i++) {
1519 		dev = iflist[i].dev;
1520 		if (dev) {
1521 			aup = (struct au1000_private *) dev->priv;
1522 			unregister_netdev(dev);
1523 			if (aup->mii)
1524 				kfree(aup->mii);
1525 			for (j = 0; j < NUM_RX_DMA; j++) {
1526 				if (aup->rx_db_inuse[j])
1527 					ReleaseDB(aup, aup->rx_db_inuse[j]);
1528 			}
1529 			for (j = 0; j < NUM_TX_DMA; j++) {
1530 				if (aup->tx_db_inuse[j])
1531 					ReleaseDB(aup, aup->tx_db_inuse[j]);
1532 			}
1533 			dma_free((void *)aup->vaddr, MAX_BUF_SIZE *
1534 					(NUM_TX_BUFFS+NUM_RX_BUFFS));
1535 			kfree(dev);
1536 			release_region(iflist[i].base_addr, MAC_IOSIZE);
1537 		}
1538 	}
1539 }
1540 
update_tx_stats(struct net_device * dev,u32 status)1541 static void update_tx_stats(struct net_device *dev, u32 status)
1542 {
1543 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1544 	struct net_device_stats *ps = &aup->stats;
1545 
1546 	if (status & TX_FRAME_ABORTED) {
1547 		if (dev->if_port == IF_PORT_100BASEFX) {
1548 			if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
1549 				/* any other tx errors are only valid
1550 				 * in half duplex mode */
1551 				ps->tx_errors++;
1552 				ps->tx_aborted_errors++;
1553 			}
1554 		}
1555 		else {
1556 			ps->tx_errors++;
1557 			ps->tx_aborted_errors++;
1558 			if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
1559 				ps->tx_carrier_errors++;
1560 		}
1561 	}
1562 }
1563 
1564 
1565 /*
1566  * Called from the interrupt service routine to acknowledge
1567  * the TX DONE bits.  This is a must if the irq is setup as
1568  * edge triggered.
1569  */
au1000_tx_ack(struct net_device * dev)1570 static void au1000_tx_ack(struct net_device *dev)
1571 {
1572 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1573 	volatile tx_dma_t *ptxd;
1574 
1575 	ptxd = aup->tx_dma_ring[aup->tx_tail];
1576 
1577 	while (ptxd->buff_stat & TX_T_DONE) {
1578 		update_tx_stats(dev, ptxd->status);
1579 		ptxd->buff_stat &= ~TX_T_DONE;
1580 		ptxd->len = 0;
1581 		au_sync();
1582 
1583 		aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
1584 		ptxd = aup->tx_dma_ring[aup->tx_tail];
1585 
1586 		if (aup->tx_full) {
1587 			aup->tx_full = 0;
1588 			netif_wake_queue(dev);
1589 		}
1590 	}
1591 }
1592 
1593 
1594 /*
1595  * Au1000 transmit routine.
1596  */
au1000_tx(struct sk_buff * skb,struct net_device * dev)1597 static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1598 {
1599 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1600 	struct net_device_stats *ps = &aup->stats;
1601 	volatile tx_dma_t *ptxd;
1602 	u32 buff_stat;
1603 	db_dest_t *pDB;
1604 	int i;
1605 
1606 	if (au1000_debug > 5)
1607 		printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
1608 				dev->name, (unsigned)aup, skb->len,
1609 				skb->data, aup->tx_head);
1610 
1611 	ptxd = aup->tx_dma_ring[aup->tx_head];
1612 	buff_stat = ptxd->buff_stat;
1613 	if (buff_stat & TX_DMA_ENABLE) {
1614 		/* We've wrapped around and the transmitter is still busy */
1615 		netif_stop_queue(dev);
1616 		aup->tx_full = 1;
1617 		return 1;
1618 	}
1619 	else if (buff_stat & TX_T_DONE) {
1620 		update_tx_stats(dev, ptxd->status);
1621 		ptxd->len = 0;
1622 	}
1623 
1624 	if (aup->tx_full) {
1625 		aup->tx_full = 0;
1626 		netif_wake_queue(dev);
1627 	}
1628 
1629 	pDB = aup->tx_db_inuse[aup->tx_head];
1630 	memcpy((void *)pDB->vaddr, skb->data, skb->len);
1631 	if (skb->len < ETH_ZLEN) {
1632 		for (i=skb->len; i<ETH_ZLEN; i++) {
1633 			((char *)pDB->vaddr)[i] = 0;
1634 		}
1635 		ptxd->len = ETH_ZLEN;
1636 	}
1637 	else
1638 		ptxd->len = skb->len;
1639 
1640 	ps->tx_packets++;
1641 	ps->tx_bytes += ptxd->len;
1642 
1643 	ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1644 	au_sync();
1645 	dev_kfree_skb(skb);
1646 	aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1647 	dev->trans_start = jiffies;
1648 	return 0;
1649 }
1650 
update_rx_stats(struct net_device * dev,u32 status)1651 static inline void update_rx_stats(struct net_device *dev, u32 status)
1652 {
1653 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1654 	struct net_device_stats *ps = &aup->stats;
1655 
1656 	ps->rx_packets++;
1657 	if (status & RX_MCAST_FRAME)
1658 		ps->multicast++;
1659 
1660 	if (status & RX_ERROR) {
1661 		ps->rx_errors++;
1662 		if (status & RX_MISSED_FRAME)
1663 			ps->rx_missed_errors++;
1664 		if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
1665 			ps->rx_length_errors++;
1666 		if (status & RX_CRC_ERROR)
1667 			ps->rx_crc_errors++;
1668 		if (status & RX_COLL)
1669 			ps->collisions++;
1670 	}
1671 	else
1672 		ps->rx_bytes += status & RX_FRAME_LEN_MASK;
1673 
1674 }
1675 
1676 /*
1677  * Au1000 receive routine.
1678  */
au1000_rx(struct net_device * dev)1679 static int au1000_rx(struct net_device *dev)
1680 {
1681 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1682 	struct sk_buff *skb;
1683 	volatile rx_dma_t *prxd;
1684 	u32 buff_stat, status;
1685 	db_dest_t *pDB;
1686 	u32	frmlen;
1687 
1688 	if (au1000_debug > 5)
1689 		printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
1690 
1691 	prxd = aup->rx_dma_ring[aup->rx_head];
1692 	buff_stat = prxd->buff_stat;
1693 	while (buff_stat & RX_T_DONE)  {
1694 		status = prxd->status;
1695 		pDB = aup->rx_db_inuse[aup->rx_head];
1696 		update_rx_stats(dev, status);
1697 		if (!(status & RX_ERROR))  {
1698 
1699 			/* good frame */
1700 			frmlen = (status & RX_FRAME_LEN_MASK);
1701 			frmlen -= 4; /* Remove FCS */
1702 			skb = dev_alloc_skb(frmlen + 2);
1703 			if (skb == NULL) {
1704 				printk(KERN_ERR
1705 				       "%s: Memory squeeze, dropping packet.\n",
1706 				       dev->name);
1707 				aup->stats.rx_dropped++;
1708 				continue;
1709 			}
1710 			skb->dev = dev;
1711 			skb_reserve(skb, 2);	/* 16 byte IP header align */
1712 			eth_copy_and_sum(skb,
1713 				(unsigned char *)pDB->vaddr, frmlen, 0);
1714 			skb_put(skb, frmlen);
1715 			skb->protocol = eth_type_trans(skb, dev);
1716 			netif_rx(skb);	/* pass the packet to upper layers */
1717 		}
1718 		else {
1719 			if (au1000_debug > 4) {
1720 				if (status & RX_MISSED_FRAME)
1721 					printk("rx miss\n");
1722 				if (status & RX_WDOG_TIMER)
1723 					printk("rx wdog\n");
1724 				if (status & RX_RUNT)
1725 					printk("rx runt\n");
1726 				if (status & RX_OVERLEN)
1727 					printk("rx overlen\n");
1728 				if (status & RX_COLL)
1729 					printk("rx coll\n");
1730 				if (status & RX_MII_ERROR)
1731 					printk("rx mii error\n");
1732 				if (status & RX_CRC_ERROR)
1733 					printk("rx crc error\n");
1734 				if (status & RX_LEN_ERROR)
1735 					printk("rx len error\n");
1736 				if (status & RX_U_CNTRL_FRAME)
1737 					printk("rx u control frame\n");
1738 				if (status & RX_MISSED_FRAME)
1739 					printk("rx miss\n");
1740 			}
1741 		}
1742 		prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
1743 		aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
1744 		au_sync();
1745 
1746 		/* next descriptor */
1747 		prxd = aup->rx_dma_ring[aup->rx_head];
1748 		buff_stat = prxd->buff_stat;
1749 		dev->last_rx = jiffies;
1750 	}
1751 	return 0;
1752 }
1753 
1754 
1755 /*
1756  * Au1000 interrupt service routine.
1757  */
au1000_interrupt(int irq,void * dev_id,struct pt_regs * regs)1758 void au1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1759 {
1760 	struct net_device *dev = (struct net_device *) dev_id;
1761 
1762 	if (dev == NULL) {
1763 		printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
1764 		return;
1765 	}
1766 
1767 	/* Handle RX interrupts first to minimize chance of overrun */
1768 
1769 	au1000_rx(dev);
1770 	au1000_tx_ack(dev);
1771 }
1772 
1773 
1774 /*
1775  * The Tx ring has been full longer than the watchdog timeout
1776  * value. The transmitter must be hung?
1777  */
au1000_tx_timeout(struct net_device * dev)1778 static void au1000_tx_timeout(struct net_device *dev)
1779 {
1780 	printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
1781 	reset_mac(dev);
1782 	au1000_init(dev);
1783 	dev->trans_start = jiffies;
1784 	netif_wake_queue(dev);
1785 }
1786 
1787 
1788 static unsigned const ethernet_polynomial = 0x04c11db7U;
ether_crc(int length,unsigned char * data)1789 static inline u32 ether_crc(int length, unsigned char *data)
1790 {
1791     int crc = -1;
1792 
1793     while(--length >= 0) {
1794 		unsigned char current_octet = *data++;
1795 		int bit;
1796 		for (bit = 0; bit < 8; bit++, current_octet >>= 1)
1797 			crc = (crc << 1) ^
1798 				((crc < 0) ^ (current_octet & 1) ?
1799 				 ethernet_polynomial : 0);
1800     }
1801     return crc;
1802 }
1803 
set_rx_mode(struct net_device * dev)1804 static void set_rx_mode(struct net_device *dev)
1805 {
1806 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1807 
1808 	if (au1000_debug > 4)
1809 		printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
1810 
1811 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1812 		aup->mac->control |= MAC_PROMISCUOUS;
1813 		printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1814 	} else if ((dev->flags & IFF_ALLMULTI)  ||
1815 			   dev->mc_count > MULTICAST_FILTER_LIMIT) {
1816 		aup->mac->control |= MAC_PASS_ALL_MULTI;
1817 		aup->mac->control &= ~MAC_PROMISCUOUS;
1818 		printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
1819 	} else {
1820 		int i;
1821 		struct dev_mc_list *mclist;
1822 		u32 mc_filter[2];	/* Multicast hash filter */
1823 
1824 		mc_filter[1] = mc_filter[0] = 0;
1825 		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1826 			 i++, mclist = mclist->next) {
1827 			set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
1828 					mc_filter);
1829 		}
1830 		aup->mac->multi_hash_high = mc_filter[1];
1831 		aup->mac->multi_hash_low = mc_filter[0];
1832 		aup->mac->control &= ~MAC_PROMISCUOUS;
1833 		aup->mac->control |= MAC_HASH_MODE;
1834 	}
1835 }
1836 
1837 
au1000_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1838 static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1839 {
1840 	//u16 *data = (u16 *)&rq->ifr_data;
1841 
1842 	/* fixme */
1843 	switch(cmd) {
1844 		case SIOCDEVPRIVATE:	/* Get the address of the PHY in use. */
1845 		//data[0] = PHY_ADDRESS;
1846 		case SIOCDEVPRIVATE+1:	/* Read the specified MII register. */
1847 		//data[3] = mdio_read(ioaddr, data[0], data[1]);
1848 		return 0;
1849 		case SIOCDEVPRIVATE+2:	/* Write the specified MII register */
1850 		//mdio_write(ioaddr, data[0], data[1], data[2]);
1851 		return 0;
1852 		default:
1853 		return -EOPNOTSUPP;
1854 	}
1855 }
1856 
1857 
au1000_set_config(struct net_device * dev,struct ifmap * map)1858 static int au1000_set_config(struct net_device *dev, struct ifmap *map)
1859 {
1860 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1861 	u16 control;
1862 
1863 	if (au1000_debug > 4)  {
1864 		printk("%s: set_config called: dev->if_port %d map->port %x\n",
1865 				dev->name, dev->if_port, map->port);
1866 	}
1867 
1868 	switch(map->port){
1869 		case IF_PORT_UNKNOWN: /* use auto here */
1870 			printk(KERN_INFO "%s: config phy for aneg\n",
1871 					dev->name);
1872 			dev->if_port = map->port;
1873 			/* Link Down: the timer will bring it up */
1874 			netif_carrier_off(dev);
1875 
1876 			/* read current control */
1877 			control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
1878 			control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
1879 
1880 			/* enable auto negotiation and reset the negotiation */
1881 			mdio_write(dev, aup->phy_addr, MII_CONTROL,
1882 					control | MII_CNTL_AUTO |
1883 					MII_CNTL_RST_AUTO);
1884 
1885 			break;
1886 
1887 		case IF_PORT_10BASET: /* 10BaseT */
1888 			printk(KERN_INFO "%s: config phy for 10BaseT\n",
1889 					dev->name);
1890 			dev->if_port = map->port;
1891 
1892 			/* Link Down: the timer will bring it up */
1893 			netif_carrier_off(dev);
1894 
1895 			/* set Speed to 10Mbps, Half Duplex */
1896 			control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
1897 			control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
1898 					MII_CNTL_FDX);
1899 
1900 			/* disable auto negotiation and force 10M/HD mode*/
1901 			mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
1902 			break;
1903 
1904 		case IF_PORT_100BASET: /* 100BaseT */
1905 		case IF_PORT_100BASETX: /* 100BaseTx */
1906 			printk(KERN_INFO "%s: config phy for 100BaseTX\n",
1907 					dev->name);
1908 			dev->if_port = map->port;
1909 
1910 			/* Link Down: the timer will bring it up */
1911 			netif_carrier_off(dev);
1912 
1913 			/* set Speed to 100Mbps, Half Duplex */
1914 			/* disable auto negotiation and enable 100MBit Mode */
1915 			control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
1916 			control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
1917 			control |= MII_CNTL_F100;
1918 			mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
1919 			break;
1920 
1921 		case IF_PORT_100BASEFX: /* 100BaseFx */
1922 			printk(KERN_INFO "%s: config phy for 100BaseFX\n",
1923 					dev->name);
1924 			dev->if_port = map->port;
1925 
1926 			/* Link Down: the timer will bring it up */
1927 			netif_carrier_off(dev);
1928 
1929 			/* set Speed to 100Mbps, Full Duplex */
1930 			/* disable auto negotiation and enable 100MBit Mode */
1931 			control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
1932 			control &= ~MII_CNTL_AUTO;
1933 			control |=  MII_CNTL_F100 | MII_CNTL_FDX;
1934 			mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
1935 			break;
1936 		case IF_PORT_10BASE2: /* 10Base2 */
1937 		case IF_PORT_AUI: /* AUI */
1938 		/* These Modes are not supported (are they?)*/
1939 			printk(KERN_ERR "%s: 10Base2/AUI not supported",
1940 					dev->name);
1941 			return -EOPNOTSUPP;
1942 			break;
1943 
1944 		default:
1945 			printk(KERN_ERR "%s: Invalid media selected",
1946 					dev->name);
1947 			return -EINVAL;
1948 	}
1949 	return 0;
1950 }
1951 
au1000_get_stats(struct net_device * dev)1952 static struct net_device_stats *au1000_get_stats(struct net_device *dev)
1953 {
1954 	struct au1000_private *aup = (struct au1000_private *) dev->priv;
1955 
1956 	if (au1000_debug > 4)
1957 		printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev);
1958 
1959 	if (netif_device_present(dev)) {
1960 		return &aup->stats;
1961 	}
1962 	return 0;
1963 }
1964 
1965 module_init(au1000_init_module);
1966 module_exit(au1000_cleanup_module);
1967