1 /*
2     A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3     ethernet driver for Linux.
4     Copyright (C) 1997  Sten Wang
5 
6     This program is free software; you can redistribute it and/or
7     modify it under the terms of the GNU General Public License
8     as published by the Free Software Foundation; either version 2
9     of the License, or (at your option) any later version.
10 
11     This program is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15 
16     DAVICOM Web-Site: www.davicom.com.tw
17 
18     Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19     Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20 
21     (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22 
23     Marcelo Tosatti <marcelo@conectiva.com.br> :
24     Made it compile in 2.3 (device to net_device)
25 
26     Alan Cox <alan@redhat.com> :
27     Cleaned up for kernel merge.
28     Removed the back compatibility support
29     Reformatted, fixing spelling etc as I went
30     Removed IRQ 0-15 assumption
31 
32     Jeff Garzik <jgarzik@pobox.com> :
33     Updated to use new PCI driver API.
34     Resource usage cleanups.
35     Report driver version to user.
36 
37     Tobias Ringstrom <tori@unhappy.mine.nu> :
38     Cleaned up and added SMP safety.  Thanks go to Jeff Garzik,
39     Andrew Morton and Frank Davis for the SMP safety fixes.
40 
41     Vojtech Pavlik <vojtech@suse.cz> :
42     Cleaned up pointer arithmetics.
43     Fixed a lot of 64bit issues.
44     Cleaned up printk()s a bit.
45     Fixed some obvious big endian problems.
46 
47     Tobias Ringstrom <tori@unhappy.mine.nu> :
48     Use time_after for jiffies calculation.  Added ethtool
49     support.  Updated PCI resource allocation.  Do not
50     forget to unmap PCI mapped skbs.
51 
52     Alan Cox <alan@redhat.com>
53     Added new PCI identifiers provided by Clear Zhang at ALi
54     for their 1563 ethernet device.
55 
56     TODO
57 
58     Implement pci_driver::suspend() and pci_driver::resume()
59     power management methods.
60 
61     Check on 64 bit boxes.
62     Check and fix on big endian boxes.
63 
64     Test and make sure PCI latency is now correct for all cases.
65 */
66 
67 #define DRV_NAME	"dmfe"
68 #define DRV_VERSION	"1.36.4"
69 #define DRV_RELDATE	"2002-01-17"
70 
71 #include <linux/module.h>
72 
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
75 #include <linux/string.h>
76 #include <linux/timer.h>
77 #include <linux/ptrace.h>
78 #include <linux/errno.h>
79 #include <linux/ioport.h>
80 #include <linux/slab.h>
81 #include <linux/interrupt.h>
82 #include <linux/pci.h>
83 #include <linux/init.h>
84 #include <linux/version.h>
85 #include <linux/netdevice.h>
86 #include <linux/etherdevice.h>
87 #include <linux/ethtool.h>
88 #include <linux/skbuff.h>
89 #include <linux/delay.h>
90 #include <linux/spinlock.h>
91 #include <linux/crc32.h>
92 
93 #include <asm/processor.h>
94 #include <asm/bitops.h>
95 #include <asm/io.h>
96 #include <asm/dma.h>
97 #include <asm/uaccess.h>
98 
99 
100 /* Board/System/Debug information/definition ---------------- */
101 #define PCI_DM9132_ID   0x91321282      /* Davicom DM9132 ID */
102 #define PCI_DM9102_ID   0x91021282      /* Davicom DM9102 ID */
103 #define PCI_DM9100_ID   0x91001282      /* Davicom DM9100 ID */
104 #define PCI_DM9009_ID   0x90091282      /* Davicom DM9009 ID */
105 
106 #define DM9102_IO_SIZE  0x80
107 #define DM9102A_IO_SIZE 0x100
108 #define TX_MAX_SEND_CNT 0x1             /* Maximum tx packet per time */
109 #define TX_DESC_CNT     0x10            /* Allocated Tx descriptors */
110 #define RX_DESC_CNT     0x20            /* Allocated Rx descriptors */
111 #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)	/* Max TX packet count */
112 #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)	/* TX wakeup count */
113 #define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
114 #define TX_BUF_ALLOC    0x600
115 #define RX_ALLOC_SIZE   0x620
116 #define DM910X_RESET    1
117 #define CR0_DEFAULT     0x00E00000      /* TX & RX burst mode */
118 #define CR6_DEFAULT     0x00080000      /* HD */
119 #define CR7_DEFAULT     0x180c1
120 #define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
121 #define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
122 #define MAX_PACKET_SIZE 1514
123 #define DMFE_MAX_MULTICAST 14
124 #define RX_COPY_SIZE	100
125 #define MAX_CHECK_PACKET 0x8000
126 #define DM9801_NOISE_FLOOR 8
127 #define DM9802_NOISE_FLOOR 5
128 
129 #define DMFE_10MHF      0
130 #define DMFE_100MHF     1
131 #define DMFE_10MFD      4
132 #define DMFE_100MFD     5
133 #define DMFE_AUTO       8
134 #define DMFE_1M_HPNA    0x10
135 
136 #define DMFE_TXTH_72	0x400000	/* TX TH 72 byte */
137 #define DMFE_TXTH_96	0x404000	/* TX TH 96 byte */
138 #define DMFE_TXTH_128	0x0000		/* TX TH 128 byte */
139 #define DMFE_TXTH_256	0x4000		/* TX TH 256 byte */
140 #define DMFE_TXTH_512	0x8000		/* TX TH 512 byte */
141 #define DMFE_TXTH_1K	0xC000		/* TX TH 1K  byte */
142 
143 #define DMFE_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
144 #define DMFE_TX_TIMEOUT ((3*HZ)/2)	/* tx packet time-out time 1.5 s" */
145 #define DMFE_TX_KICK 	(HZ/2)	/* tx packet Kick-out time 0.5 s" */
146 
147 #define DMFE_DBUG(dbug_now, msg, value) if (dmfe_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
148 
149 #define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
150 
151 
152 /* CR9 definition: SROM/MII */
153 #define CR9_SROM_READ   0x4800
154 #define CR9_SRCS        0x1
155 #define CR9_SRCLK       0x2
156 #define CR9_CRDOUT      0x8
157 #define SROM_DATA_0     0x0
158 #define SROM_DATA_1     0x4
159 #define PHY_DATA_1      0x20000
160 #define PHY_DATA_0      0x00000
161 #define MDCLKH          0x10000
162 
163 #define PHY_POWER_DOWN	0x800
164 
165 #define SROM_V41_CODE   0x14
166 
167 #define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);
168 
169 #define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE
170 #define CHK_IO_SIZE(pci_dev, dev_rev) __CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)
171 
172 /* Sten Check */
173 #define DEVICE net_device
174 
175 /* Structure/enum declaration ------------------------------- */
176 struct tx_desc {
177         u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
178         char *tx_buf_ptr;               /* Data for us */
179         struct tx_desc *next_tx_desc;
180 } __attribute__(( aligned(32) ));
181 
182 struct rx_desc {
183 	u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
184 	struct sk_buff *rx_skb_ptr;	/* Data for us */
185 	struct rx_desc *next_rx_desc;
186 } __attribute__(( aligned(32) ));
187 
188 struct dmfe_board_info {
189 	u32 chip_id;			/* Chip vendor/Device ID */
190 	u32 chip_revision;		/* Chip revision */
191 	struct DEVICE *next_dev;	/* next device */
192 	struct pci_dev *pdev;		/* PCI device */
193 	spinlock_t lock;
194 
195 	long ioaddr;			/* I/O base address */
196 	u32 cr0_data;
197 	u32 cr5_data;
198 	u32 cr6_data;
199 	u32 cr7_data;
200 	u32 cr15_data;
201 
202 	/* pointer for memory physical address */
203 	dma_addr_t buf_pool_dma_ptr;	/* Tx buffer pool memory */
204 	dma_addr_t buf_pool_dma_start;	/* Tx buffer pool align dword */
205 	dma_addr_t desc_pool_dma_ptr;	/* descriptor pool memory */
206 	dma_addr_t first_tx_desc_dma;
207 	dma_addr_t first_rx_desc_dma;
208 
209 	/* descriptor pointer */
210 	unsigned char *buf_pool_ptr;	/* Tx buffer pool memory */
211 	unsigned char *buf_pool_start;	/* Tx buffer pool align dword */
212 	unsigned char *desc_pool_ptr;	/* descriptor pool memory */
213 	struct tx_desc *first_tx_desc;
214 	struct tx_desc *tx_insert_ptr;
215 	struct tx_desc *tx_remove_ptr;
216 	struct rx_desc *first_rx_desc;
217 	struct rx_desc *rx_insert_ptr;
218 	struct rx_desc *rx_ready_ptr;	/* packet come pointer */
219 	unsigned long tx_packet_cnt;	/* transmitted packet count */
220 	unsigned long tx_queue_cnt;	/* wait to send packet count */
221 	unsigned long rx_avail_cnt;	/* available rx descriptor count */
222 	unsigned long interval_rx_cnt;	/* rx packet count a callback time */
223 
224 	u16 HPNA_command;		/* For HPNA register 16 */
225 	u16 HPNA_timer;			/* For HPNA remote device check */
226 	u16 dbug_cnt;
227 	u16 NIC_capability;		/* NIC media capability */
228 	u16 PHY_reg4;			/* Saved Phyxcer register 4 value */
229 
230 	u8 HPNA_present;		/* 0:none, 1:DM9801, 2:DM9802 */
231 	u8 chip_type;			/* Keep DM9102A chip type */
232 	u8 media_mode;			/* user specify media mode */
233 	u8 op_mode;			/* real work media mode */
234 	u8 phy_addr;
235 	u8 link_failed;			/* Ever link failed */
236 	u8 wait_reset;			/* Hardware failed, need to reset */
237 	u8 dm910x_chk_mode;		/* Operating mode check */
238 	u8 first_in_callback;		/* Flag to record state */
239 	struct timer_list timer;
240 
241 	/* System defined statistic counter */
242 	struct net_device_stats stats;
243 
244 	/* Driver defined statistic counter */
245 	unsigned long tx_fifo_underrun;
246 	unsigned long tx_loss_carrier;
247 	unsigned long tx_no_carrier;
248 	unsigned long tx_late_collision;
249 	unsigned long tx_excessive_collision;
250 	unsigned long tx_jabber_timeout;
251 	unsigned long reset_count;
252 	unsigned long reset_cr8;
253 	unsigned long reset_fatal;
254 	unsigned long reset_TXtimeout;
255 
256 	/* NIC SROM data */
257 	unsigned char srom[128];
258 };
259 
260 enum dmfe_offsets {
261 	DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
262 	DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
263 	DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
264 	DCR15 = 0x78
265 };
266 
267 enum dmfe_CR6_bits {
268 	CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
269 	CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
270 	CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
271 };
272 
273 /* Global variable declaration ----------------------------- */
274 static int __devinitdata printed_version;
275 static char version[] __devinitdata =
276 	KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
277 	DRV_VERSION " (" DRV_RELDATE ")\n";
278 
279 static int dmfe_debug;
280 static unsigned char dmfe_media_mode = DMFE_AUTO;
281 static u32 dmfe_cr6_user_set;
282 
283 /* For module input parameter */
284 static int debug;
285 static u32 cr6set;
286 static unsigned char mode = 8;
287 static u8 chkmode = 1;
288 static u8 HPNA_mode;		/* Default: Low Power/High Speed */
289 static u8 HPNA_rx_cmd;		/* Default: Disable Rx remote command */
290 static u8 HPNA_tx_cmd;		/* Default: Don't issue remote command */
291 static u8 HPNA_NoiseFloor;	/* Default: HPNA NoiseFloor */
292 static u8 SF_mode;		/* Special Function: 1:VLAN, 2:RX Flow Control
293 				   4: TX pause packet */
294 
295 
296 /* function declaration ------------------------------------- */
297 static int dmfe_open(struct DEVICE *);
298 static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
299 static int dmfe_stop(struct DEVICE *);
300 static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
301 static void dmfe_set_filter_mode(struct DEVICE *);
302 static struct ethtool_ops netdev_ethtool_ops;
303 static u16 read_srom_word(long ,int);
304 static void dmfe_interrupt(int , void *, struct pt_regs *);
305 static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
306 static void allocate_rx_buffer(struct dmfe_board_info *);
307 static void update_cr6(u32, unsigned long);
308 static void send_filter_frame(struct DEVICE * ,int);
309 static void dm9132_id_table(struct DEVICE * ,int);
310 static u16 phy_read(unsigned long, u8, u8, u32);
311 static void phy_write(unsigned long, u8, u8, u16, u32);
312 static void phy_write_1bit(unsigned long, u32);
313 static u16 phy_read_1bit(unsigned long);
314 static u8 dmfe_sense_speed(struct dmfe_board_info *);
315 static void dmfe_process_mode(struct dmfe_board_info *);
316 static void dmfe_timer(unsigned long);
317 static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
318 static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
319 static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
320 static void dmfe_dynamic_reset(struct DEVICE *);
321 static void dmfe_free_rxbuffer(struct dmfe_board_info *);
322 static void dmfe_init_dm910x(struct DEVICE *);
323 static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
324 static void dmfe_parse_srom(struct dmfe_board_info *);
325 static void dmfe_program_DM9801(struct dmfe_board_info *, int);
326 static void dmfe_program_DM9802(struct dmfe_board_info *);
327 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
328 static void dmfe_set_phyxcer(struct dmfe_board_info *);
329 
330 /* DM910X network baord routine ---------------------------- */
331 
332 /*
333  *	Search DM910X board ,allocate space and register it
334  */
335 
dmfe_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)336 static int __devinit dmfe_init_one (struct pci_dev *pdev,
337 				    const struct pci_device_id *ent)
338 {
339 	struct dmfe_board_info *db;	/* board information structure */
340 	struct net_device *dev;
341 	u32 dev_rev, pci_pmr;
342 	int i, err;
343 
344 	DMFE_DBUG(0, "dmfe_init_one()", 0);
345 
346 	if (!printed_version++)
347 		printk(version);
348 
349 	/* Init network device */
350 	dev = alloc_etherdev(sizeof(*db));
351 	if (dev == NULL)
352 		return -ENOMEM;
353 	SET_MODULE_OWNER(dev);
354 
355 	if (pci_set_dma_mask(pdev, 0xffffffff)) {
356 		printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
357 		err = -ENODEV;
358 		goto err_out_free;
359 	}
360 
361 	/* Enable Master/IO access, Disable memory access */
362 	err = pci_enable_device(pdev);
363 	if (err)
364 		goto err_out_free;
365 
366 	if (!pci_resource_start(pdev, 0)) {
367 		printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
368 		err = -ENODEV;
369 		goto err_out_disable;
370 	}
371 
372 	/* Read Chip revision */
373 	pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
374 
375 	if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
376 		printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
377 		err = -ENODEV;
378 		goto err_out_disable;
379 	}
380 
381 #if 0	/* pci_{enable_device,set_master} sets minimum latency for us now */
382 
383 	/* Set Latency Timer 80h */
384 	/* FIXME: setting values > 32 breaks some SiS 559x stuff.
385 	   Need a PCI quirk.. */
386 
387 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
388 #endif
389 
390 	if (pci_request_regions(pdev, DRV_NAME)) {
391 		printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
392 		err = -ENODEV;
393 		goto err_out_disable;
394 	}
395 
396 	/* Init system & device */
397 	db = dev->priv;
398 
399 	/* Allocate Tx/Rx descriptor memory */
400 	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
401 	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
402 
403 	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
404 	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
405 	db->buf_pool_start = db->buf_pool_ptr;
406 	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
407 
408 	db->chip_id = ent->driver_data;
409 	db->ioaddr = pci_resource_start(pdev, 0);
410 	db->chip_revision = dev_rev;
411 
412 	db->pdev = pdev;
413 
414 	dev->base_addr = db->ioaddr;
415 	dev->irq = pdev->irq;
416 	pci_set_drvdata(pdev, dev);
417 	dev->open = &dmfe_open;
418 	dev->hard_start_xmit = &dmfe_start_xmit;
419 	dev->stop = &dmfe_stop;
420 	dev->get_stats = &dmfe_get_stats;
421 	dev->set_multicast_list = &dmfe_set_filter_mode;
422 	dev->ethtool_ops = &netdev_ethtool_ops;
423 	spin_lock_init(&db->lock);
424 
425 	pci_read_config_dword(pdev, 0x50, &pci_pmr);
426 	pci_pmr &= 0x70000;
427 	if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
428 		db->chip_type = 1;	/* DM9102A E3 */
429 	else
430 		db->chip_type = 0;
431 
432 	/* read 64 word srom data */
433 	for (i = 0; i < 64; i++)
434 		((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
435 
436 	/* Set Node address */
437 	for (i = 0; i < 6; i++)
438 		dev->dev_addr[i] = db->srom[20 + i];
439 
440 	err = register_netdev (dev);
441 	if (err)
442 		goto err_out_res;
443 
444 	printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
445 		dev->name,
446 		ent->driver_data >> 16,
447 		pdev->slot_name);
448 	for (i = 0; i < 6; i++)
449 		printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
450 	printk(", irq %d.\n", dev->irq);
451 
452 	pci_set_master(pdev);
453 
454 	return 0;
455 
456 err_out_res:
457 	pci_release_regions(pdev);
458 err_out_disable:
459 	pci_disable_device(pdev);
460 err_out_free:
461 	pci_set_drvdata(pdev, NULL);
462 	kfree(dev);
463 
464 	return err;
465 }
466 
467 
dmfe_remove_one(struct pci_dev * pdev)468 static void __devexit dmfe_remove_one (struct pci_dev *pdev)
469 {
470 	struct net_device *dev = pci_get_drvdata(pdev);
471 	struct dmfe_board_info *db = dev->priv;
472 
473 	DMFE_DBUG(0, "dmfe_remove_one()", 0);
474 
475  	if (dev) {
476 		pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
477 					DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
478  					db->desc_pool_dma_ptr);
479 		pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
480 					db->buf_pool_ptr, db->buf_pool_dma_ptr);
481 		unregister_netdev(dev);
482 		pci_release_regions(pdev);
483 		kfree(dev);	/* free board information */
484 		pci_set_drvdata(pdev, NULL);
485 	}
486 
487 	DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
488 }
489 
490 
491 /*
492  *	Open the interface.
493  *	The interface is opened whenever "ifconfig" actives it.
494  */
495 
dmfe_open(struct DEVICE * dev)496 static int dmfe_open(struct DEVICE *dev)
497 {
498 	int ret;
499 	struct dmfe_board_info *db = dev->priv;
500 
501 	DMFE_DBUG(0, "dmfe_open", 0);
502 
503 	ret = request_irq(dev->irq, &dmfe_interrupt, SA_SHIRQ, dev->name, dev);
504 	if (ret)
505 		return ret;
506 
507 	/* system variable init */
508 	db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
509 	db->tx_packet_cnt = 0;
510 	db->tx_queue_cnt = 0;
511 	db->rx_avail_cnt = 0;
512 	db->link_failed = 1;
513 	db->wait_reset = 0;
514 
515 	db->first_in_callback = 0;
516 	db->NIC_capability = 0xf;	/* All capability*/
517 	db->PHY_reg4 = 0x1e0;
518 
519 	/* CR6 operation mode decision */
520 	if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
521 		(db->chip_revision >= 0x02000030) ) {
522     		db->cr6_data |= DMFE_TXTH_256;
523 		db->cr0_data = CR0_DEFAULT;
524 		db->dm910x_chk_mode=4;		/* Enter the normal mode */
525  	} else {
526 		db->cr6_data |= CR6_SFT;	/* Store & Forward mode */
527 		db->cr0_data = 0;
528 		db->dm910x_chk_mode = 1;	/* Enter the check mode */
529 	}
530 
531 	/* Initilize DM910X board */
532 	dmfe_init_dm910x(dev);
533 
534 	/* Active System Interface */
535 	netif_wake_queue(dev);
536 
537 	/* set and active a timer process */
538 	init_timer(&db->timer);
539 	db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
540 	db->timer.data = (unsigned long)dev;
541 	db->timer.function = &dmfe_timer;
542 	add_timer(&db->timer);
543 
544 	return 0;
545 }
546 
547 
548 /*	Initilize DM910X board
549  *	Reset DM910X board
550  *	Initilize TX/Rx descriptor chain structure
551  *	Send the set-up frame
552  *	Enable Tx/Rx machine
553  */
554 
dmfe_init_dm910x(struct DEVICE * dev)555 static void dmfe_init_dm910x(struct DEVICE *dev)
556 {
557 	struct dmfe_board_info *db = dev->priv;
558 	unsigned long ioaddr = db->ioaddr;
559 
560 	DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
561 
562 	/* Reset DM910x MAC controller */
563 	outl(DM910X_RESET, ioaddr + DCR0);	/* RESET MAC */
564 	udelay(100);
565 	outl(db->cr0_data, ioaddr + DCR0);
566 	udelay(5);
567 
568 	/* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
569 	db->phy_addr = 1;
570 
571 	/* Parser SROM and media mode */
572 	dmfe_parse_srom(db);
573 	db->media_mode = dmfe_media_mode;
574 
575 	/* RESET Phyxcer Chip by GPR port bit 7 */
576 	outl(0x180, ioaddr + DCR12);		/* Let bit 7 output port */
577 	if (db->chip_id == PCI_DM9009_ID) {
578 		outl(0x80, ioaddr + DCR12);	/* Issue RESET signal */
579 		mdelay(300);			/* Delay 300 ms */
580 	}
581 	outl(0x0, ioaddr + DCR12);	/* Clear RESET signal */
582 
583 	/* Process Phyxcer Media Mode */
584 	if ( !(db->media_mode & 0x10) )	/* Force 1M mode */
585 		dmfe_set_phyxcer(db);
586 
587 	/* Media Mode Process */
588 	if ( !(db->media_mode & DMFE_AUTO) )
589 		db->op_mode = db->media_mode; 	/* Force Mode */
590 
591 	/* Initiliaze Transmit/Receive decriptor and CR3/4 */
592 	dmfe_descriptor_init(db, ioaddr);
593 
594 	/* Init CR6 to program DM910x operation */
595 	update_cr6(db->cr6_data, ioaddr);
596 
597 	/* Send setup frame */
598 	if (db->chip_id == PCI_DM9132_ID)
599 		dm9132_id_table(dev, dev->mc_count);	/* DM9132 */
600 	else
601 		send_filter_frame(dev, dev->mc_count);	/* DM9102/DM9102A */
602 
603 	/* Init CR7, interrupt active bit */
604 	db->cr7_data = CR7_DEFAULT;
605 	outl(db->cr7_data, ioaddr + DCR7);
606 
607 	/* Init CR15, Tx jabber and Rx watchdog timer */
608 	outl(db->cr15_data, ioaddr + DCR15);
609 
610 	/* Enable DM910X Tx/Rx function */
611 	db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
612 	update_cr6(db->cr6_data, ioaddr);
613 }
614 
615 
616 /*
617  *	Hardware start transmission.
618  *	Send a packet to media from the upper layer.
619  */
620 
dmfe_start_xmit(struct sk_buff * skb,struct DEVICE * dev)621 static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
622 {
623 	struct dmfe_board_info *db = dev->priv;
624 	struct tx_desc *txptr;
625 	unsigned long flags;
626 
627 	DMFE_DBUG(0, "dmfe_start_xmit", 0);
628 
629 	/* Resource flag check */
630 	netif_stop_queue(dev);
631 
632 	/* Too large packet check */
633 	if (skb->len > MAX_PACKET_SIZE) {
634 		printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
635 		dev_kfree_skb(skb);
636 		return 0;
637 	}
638 
639 	spin_lock_irqsave(&db->lock, flags);
640 
641 	/* No Tx resource check, it never happen nromally */
642 	if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
643 		spin_unlock_irqrestore(&db->lock, flags);
644 		printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_queue_cnt);
645 		return 1;
646 	}
647 
648 	/* Disable NIC interrupt */
649 	outl(0, dev->base_addr + DCR7);
650 
651 	/* transmit this packet */
652 	txptr = db->tx_insert_ptr;
653 	memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
654 	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
655 
656 	/* Point to next transmit free descriptor */
657 	db->tx_insert_ptr = txptr->next_tx_desc;
658 
659 	/* Transmit Packet Process */
660 	if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
661 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
662 		db->tx_packet_cnt++;			/* Ready to send */
663 		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
664 		dev->trans_start = jiffies;		/* saved time stamp */
665 	} else {
666 		db->tx_queue_cnt++;			/* queue TX packet */
667 		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
668 	}
669 
670 	/* Tx resource check */
671 	if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
672 		netif_wake_queue(dev);
673 
674 	/* free this SKB */
675 	dev_kfree_skb(skb);
676 
677 	/* Restore CR7 to enable interrupt */
678 	spin_unlock_irqrestore(&db->lock, flags);
679 	outl(db->cr7_data, dev->base_addr + DCR7);
680 
681 	return 0;
682 }
683 
684 
685 /*
686  *	Stop the interface.
687  *	The interface is stopped when it is brought.
688  */
689 
dmfe_stop(struct DEVICE * dev)690 static int dmfe_stop(struct DEVICE *dev)
691 {
692 	struct dmfe_board_info *db = dev->priv;
693 	unsigned long ioaddr = dev->base_addr;
694 
695 	DMFE_DBUG(0, "dmfe_stop", 0);
696 
697 	/* disable system */
698 	netif_stop_queue(dev);
699 
700 	/* deleted timer */
701 	del_timer_sync(&db->timer);
702 
703 	/* Reset & stop DM910X board */
704 	outl(DM910X_RESET, ioaddr + DCR0);
705 	udelay(5);
706 	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
707 
708 	/* free interrupt */
709 	free_irq(dev->irq, dev);
710 
711 	/* free allocated rx buffer */
712 	dmfe_free_rxbuffer(db);
713 
714 #if 0
715 	/* show statistic counter */
716 	printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
717 		db->tx_fifo_underrun, db->tx_excessive_collision,
718 		db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
719 		db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
720 		db->reset_fatal, db->reset_TXtimeout);
721 #endif
722 
723 	return 0;
724 }
725 
726 
727 /*
728  *	DM9102 insterrupt handler
729  *	receive the packet to upper layer, free the transmitted packet
730  */
731 
dmfe_interrupt(int irq,void * dev_id,struct pt_regs * regs)732 static void dmfe_interrupt(int irq, void *dev_id, struct pt_regs *regs)
733 {
734 	struct DEVICE *dev = dev_id;
735 	struct dmfe_board_info *db = (struct dmfe_board_info *) dev->priv;
736 	unsigned long ioaddr = dev->base_addr;
737 	unsigned long flags;
738 
739 	DMFE_DBUG(0, "dmfe_interrupt()", 0);
740 
741 	if (!dev) {
742 		DMFE_DBUG(1, "dmfe_interrupt() without DEVICE arg", 0);
743 		return;
744 	}
745 
746 	spin_lock_irqsave(&db->lock, flags);
747 
748 	/* Got DM910X status */
749 	db->cr5_data = inl(ioaddr + DCR5);
750 	outl(db->cr5_data, ioaddr + DCR5);
751 	if ( !(db->cr5_data & 0xc1) ) {
752 		spin_unlock_irqrestore(&db->lock, flags);
753 		return;
754 	}
755 
756 	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
757 	outl(0, ioaddr + DCR7);
758 
759 	/* Check system status */
760 	if (db->cr5_data & 0x2000) {
761 		/* system bus error happen */
762 		DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
763 		db->reset_fatal++;
764 		db->wait_reset = 1;	/* Need to RESET */
765 		spin_unlock_irqrestore(&db->lock, flags);
766 		return;
767 	}
768 
769 	 /* Received the coming packet */
770 	if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
771 		dmfe_rx_packet(dev, db);
772 
773 	/* reallocate rx descriptor buffer */
774 	if (db->rx_avail_cnt<RX_DESC_CNT)
775 		allocate_rx_buffer(db);
776 
777 	/* Free the transmitted descriptor */
778 	if ( db->cr5_data & 0x01)
779 		dmfe_free_tx_pkt(dev, db);
780 
781 	/* Mode Check */
782 	if (db->dm910x_chk_mode & 0x2) {
783 		db->dm910x_chk_mode = 0x4;
784 		db->cr6_data |= 0x100;
785 		update_cr6(db->cr6_data, db->ioaddr);
786 	}
787 
788 	/* Restore CR7 to enable interrupt mask */
789 	outl(db->cr7_data, ioaddr + DCR7);
790 
791 	spin_unlock_irqrestore(&db->lock, flags);
792 }
793 
794 
795 /*
796  *	Free TX resource after TX complete
797  */
798 
dmfe_free_tx_pkt(struct DEVICE * dev,struct dmfe_board_info * db)799 static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
800 {
801 	struct tx_desc *txptr;
802 	unsigned long ioaddr = dev->base_addr;
803 	u32 tdes0;
804 
805 	txptr = db->tx_remove_ptr;
806 	while(db->tx_packet_cnt) {
807 		tdes0 = le32_to_cpu(txptr->tdes0);
808 		/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
809 		if (tdes0 & 0x80000000)
810 			break;
811 
812 		/* A packet sent completed */
813 		db->tx_packet_cnt--;
814 		db->stats.tx_packets++;
815 
816 		/* Transmit statistic counter */
817 		if ( tdes0 != 0x7fffffff ) {
818 			/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
819 			db->stats.collisions += (tdes0 >> 3) & 0xf;
820 			db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
821 			if (tdes0 & TDES0_ERR_MASK) {
822 				db->stats.tx_errors++;
823 
824 				if (tdes0 & 0x0002) {	/* UnderRun */
825 					db->tx_fifo_underrun++;
826 					if ( !(db->cr6_data & CR6_SFT) ) {
827 						db->cr6_data = db->cr6_data | CR6_SFT;
828 						update_cr6(db->cr6_data, db->ioaddr);
829 					}
830 				}
831 				if (tdes0 & 0x0100)
832 					db->tx_excessive_collision++;
833 				if (tdes0 & 0x0200)
834 					db->tx_late_collision++;
835 				if (tdes0 & 0x0400)
836 					db->tx_no_carrier++;
837 				if (tdes0 & 0x0800)
838 					db->tx_loss_carrier++;
839 				if (tdes0 & 0x4000)
840 					db->tx_jabber_timeout++;
841 			}
842 		}
843 
844     		txptr = txptr->next_tx_desc;
845 	}/* End of while */
846 
847 	/* Update TX remove pointer to next */
848 	db->tx_remove_ptr = txptr;
849 
850 	/* Send the Tx packet in queue */
851 	if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
852 		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
853 		db->tx_packet_cnt++;			/* Ready to send */
854 		db->tx_queue_cnt--;
855 		outl(0x1, ioaddr + DCR1);		/* Issue Tx polling */
856 		dev->trans_start = jiffies;		/* saved time stamp */
857 	}
858 
859 	/* Resource available check */
860 	if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
861 		netif_wake_queue(dev);	/* Active upper layer, send again */
862 }
863 
864 
865 /*
866  *	Calculate the CRC valude of the Rx packet
867  *	flag = 	1 : return the reverse CRC (for the received packet CRC)
868  *		0 : return the normal CRC (for Hash Table index)
869  */
870 
cal_CRC(unsigned char * Data,unsigned int Len,u8 flag)871 static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
872 {
873 	u32 crc = ether_crc_le(Len, Data);
874 	if (flag) crc = ~crc;
875 	return crc;
876 }
877 
878 
879 /*
880  *	Receive the come packet and pass to upper layer
881  */
882 
dmfe_rx_packet(struct DEVICE * dev,struct dmfe_board_info * db)883 static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
884 {
885 	struct rx_desc *rxptr;
886 	struct sk_buff *skb;
887 	int rxlen;
888 	u32 rdes0;
889 
890 	rxptr = db->rx_ready_ptr;
891 
892 	while(db->rx_avail_cnt) {
893 		rdes0 = le32_to_cpu(rxptr->rdes0);
894 		if (rdes0 & 0x80000000)	/* packet owner check */
895 			break;
896 
897 		db->rx_avail_cnt--;
898 		db->interval_rx_cnt++;
899 
900 		pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
901 		if ( (rdes0 & 0x300) != 0x300) {
902 			/* A packet without First/Last flag */
903 			/* reuse this SKB */
904 			DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
905 			dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
906 		} else {
907 			/* A packet with First/Last flag */
908 			rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
909 
910 			/* error summary bit check */
911 			if (rdes0 & 0x8000) {
912 				/* This is a error packet */
913 				//printk(DRV_NAME ": rdes0: %lx\n", rdes0);
914 				db->stats.rx_errors++;
915 				if (rdes0 & 1)
916 					db->stats.rx_fifo_errors++;
917 				if (rdes0 & 2)
918 					db->stats.rx_crc_errors++;
919 				if (rdes0 & 0x80)
920 					db->stats.rx_length_errors++;
921 			}
922 
923 			if ( !(rdes0 & 0x8000) ||
924 				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
925 				skb = rxptr->rx_skb_ptr;
926 
927 				/* Received Packet CRC check need or not */
928 				if ( (db->dm910x_chk_mode & 1) &&
929 					(cal_CRC(skb->tail, rxlen, 1) !=
930 					(*(u32 *) (skb->tail+rxlen) ))) { /* FIXME (?) */
931 					/* Found a error received packet */
932 					dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
933 					db->dm910x_chk_mode = 3;
934 				} else {
935 					/* Good packet, send to upper layer */
936 					/* Shorst packet used new SKB */
937 					if ( (rxlen < RX_COPY_SIZE) &&
938 						( (skb = dev_alloc_skb(rxlen + 2) )
939 						!= NULL) ) {
940 						/* size less than COPY_SIZE, allocate a rxlen SKB */
941 						skb->dev = dev;
942 						skb_reserve(skb, 2); /* 16byte align */
943 						memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
944 						dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
945 					} else {
946 						skb->dev = dev;
947 						skb_put(skb, rxlen);
948 					}
949 					skb->protocol = eth_type_trans(skb, dev);
950 					netif_rx(skb);
951 					dev->last_rx = jiffies;
952 					db->stats.rx_packets++;
953 					db->stats.rx_bytes += rxlen;
954 				}
955 			} else {
956 				/* Reuse SKB buffer when the packet is error */
957 				DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
958 				dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
959 			}
960 		}
961 
962 		rxptr = rxptr->next_rx_desc;
963 	}
964 
965 	db->rx_ready_ptr = rxptr;
966 }
967 
968 
969 /*
970  *	Get statistics from driver.
971  */
972 
dmfe_get_stats(struct DEVICE * dev)973 static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
974 {
975 	struct dmfe_board_info *db = (struct dmfe_board_info *)dev->priv;
976 
977 	DMFE_DBUG(0, "dmfe_get_stats", 0);
978 	return &db->stats;
979 }
980 
981 
982 /*
983  * Set DM910X multicast address
984  */
985 
dmfe_set_filter_mode(struct DEVICE * dev)986 static void dmfe_set_filter_mode(struct DEVICE * dev)
987 {
988 	struct dmfe_board_info *db = dev->priv;
989 	unsigned long flags;
990 
991 	DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
992 	spin_lock_irqsave(&db->lock, flags);
993 
994 	if (dev->flags & IFF_PROMISC) {
995 		DMFE_DBUG(0, "Enable PROM Mode", 0);
996 		db->cr6_data |= CR6_PM | CR6_PBF;
997 		update_cr6(db->cr6_data, db->ioaddr);
998 		spin_unlock_irqrestore(&db->lock, flags);
999 		return;
1000 	}
1001 
1002 	if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
1003 		DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
1004 		db->cr6_data &= ~(CR6_PM | CR6_PBF);
1005 		db->cr6_data |= CR6_PAM;
1006 		spin_unlock_irqrestore(&db->lock, flags);
1007 		return;
1008 	}
1009 
1010 	DMFE_DBUG(0, "Set multicast address", dev->mc_count);
1011 	if (db->chip_id == PCI_DM9132_ID)
1012 		dm9132_id_table(dev, dev->mc_count);	/* DM9132 */
1013 	else
1014 		send_filter_frame(dev, dev->mc_count); 	/* DM9102/DM9102A */
1015 	spin_unlock_irqrestore(&db->lock, flags);
1016 }
1017 
1018 
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1019 static void netdev_get_drvinfo(struct net_device *dev,
1020 			       struct ethtool_drvinfo *info)
1021 {
1022 	struct dmfe_board_info *np = dev->priv;
1023 
1024 	strcpy(info->driver, DRV_NAME);
1025 	strcpy(info->version, DRV_VERSION);
1026 	if (np->pdev)
1027 		strcpy(info->bus_info, pci_name(np->pdev));
1028 	else
1029 		sprintf(info->bus_info, "EISA 0x%lx %d",
1030 			dev->base_addr, dev->irq);
1031 }
1032 
1033 static struct ethtool_ops netdev_ethtool_ops = {
1034 	.get_drvinfo		= netdev_get_drvinfo,
1035 };
1036 
1037 /*
1038  *	A periodic timer routine
1039  *	Dynamic media sense, allocate Rx buffer...
1040  */
1041 
dmfe_timer(unsigned long data)1042 static void dmfe_timer(unsigned long data)
1043 {
1044 	u32 tmp_cr8;
1045 	unsigned char tmp_cr12;
1046 	struct DEVICE *dev = (struct DEVICE *) data;
1047 	struct dmfe_board_info *db = (struct dmfe_board_info *) dev->priv;
1048  	unsigned long flags;
1049 
1050 	DMFE_DBUG(0, "dmfe_timer()", 0);
1051 	spin_lock_irqsave(&db->lock, flags);
1052 
1053 	/* Media mode process when Link OK before enter this route */
1054 	if (db->first_in_callback == 0) {
1055 		db->first_in_callback = 1;
1056 		if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1057 			db->cr6_data &= ~0x40000;
1058 			update_cr6(db->cr6_data, db->ioaddr);
1059 			phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1060 			db->cr6_data |= 0x40000;
1061 			update_cr6(db->cr6_data, db->ioaddr);
1062 			db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1063 			add_timer(&db->timer);
1064 			spin_unlock_irqrestore(&db->lock, flags);
1065 			return;
1066 		}
1067 	}
1068 
1069 
1070 	/* Operating Mode Check */
1071 	if ( (db->dm910x_chk_mode & 0x1) &&
1072 		(db->stats.rx_packets > MAX_CHECK_PACKET) )
1073 		db->dm910x_chk_mode = 0x4;
1074 
1075 	/* Dynamic reset DM910X : system error or transmit time-out */
1076 	tmp_cr8 = inl(db->ioaddr + DCR8);
1077 	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1078 		db->reset_cr8++;
1079 		db->wait_reset = 1;
1080 	}
1081 	db->interval_rx_cnt = 0;
1082 
1083 	/* TX polling kick monitor */
1084 	if ( db->tx_packet_cnt &&
1085 	     time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1086 		outl(0x1, dev->base_addr + DCR1);   /* Tx polling again */
1087 
1088 		/* TX Timeout */
1089 		if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1090 			db->reset_TXtimeout++;
1091 			db->wait_reset = 1;
1092 			printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1093 			       dev->name);
1094 		}
1095 	}
1096 
1097 	if (db->wait_reset) {
1098 		DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1099 		db->reset_count++;
1100 		dmfe_dynamic_reset(dev);
1101 		db->first_in_callback = 0;
1102 		db->timer.expires = DMFE_TIMER_WUT;
1103 		add_timer(&db->timer);
1104 		spin_unlock_irqrestore(&db->lock, flags);
1105 		return;
1106 	}
1107 
1108 	/* Link status check, Dynamic media type change */
1109 	if (db->chip_id == PCI_DM9132_ID)
1110 		tmp_cr12 = inb(db->ioaddr + DCR9 + 3);	/* DM9132 */
1111 	else
1112 		tmp_cr12 = inb(db->ioaddr + DCR12);	/* DM9102/DM9102A */
1113 
1114 	if ( ((db->chip_id == PCI_DM9102_ID) &&
1115 		(db->chip_revision == 0x02000030)) ||
1116 		((db->chip_id == PCI_DM9132_ID) &&
1117 		(db->chip_revision == 0x02000010)) ) {
1118 		/* DM9102A Chip */
1119 		if (tmp_cr12 & 2)
1120 			tmp_cr12 = 0x0;		/* Link failed */
1121 		else
1122 			tmp_cr12 = 0x3;	/* Link OK */
1123 	}
1124 
1125 	if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1126 		/* Link Failed */
1127 		DMFE_DBUG(0, "Link Failed", tmp_cr12);
1128 		db->link_failed = 1;
1129 
1130 		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1131 		/* AUTO or force 1M Homerun/Longrun don't need */
1132 		if ( !(db->media_mode & 0x38) )
1133 			phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1134 
1135 		/* AUTO mode, if INT phyxcer link failed, select EXT device */
1136 		if (db->media_mode & DMFE_AUTO) {
1137 			/* 10/100M link failed, used 1M Home-Net */
1138 			db->cr6_data|=0x00040000;	/* bit18=1, MII */
1139 			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
1140 			update_cr6(db->cr6_data, db->ioaddr);
1141 		}
1142 	} else
1143 		if ((tmp_cr12 & 0x3) && db->link_failed) {
1144 			DMFE_DBUG(0, "Link link OK", tmp_cr12);
1145 			db->link_failed = 0;
1146 
1147 			/* Auto Sense Speed */
1148 			if ( (db->media_mode & DMFE_AUTO) &&
1149 				dmfe_sense_speed(db) )
1150 				db->link_failed = 1;
1151 			dmfe_process_mode(db);
1152 			/* SHOW_MEDIA_TYPE(db->op_mode); */
1153 		}
1154 
1155 	/* HPNA remote command check */
1156 	if (db->HPNA_command & 0xf00) {
1157 		db->HPNA_timer--;
1158 		if (!db->HPNA_timer)
1159 			dmfe_HPNA_remote_cmd_chk(db);
1160 	}
1161 
1162 	/* Timer active again */
1163 	db->timer.expires = DMFE_TIMER_WUT;
1164 	add_timer(&db->timer);
1165 	spin_unlock_irqrestore(&db->lock, flags);
1166 }
1167 
1168 
1169 /*
1170  *	Dynamic reset the DM910X board
1171  *	Stop DM910X board
1172  *	Free Tx/Rx allocated memory
1173  *	Reset DM910X board
1174  *	Re-initilize DM910X board
1175  */
1176 
dmfe_dynamic_reset(struct DEVICE * dev)1177 static void dmfe_dynamic_reset(struct DEVICE *dev)
1178 {
1179 	struct dmfe_board_info *db = dev->priv;
1180 
1181 	DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1182 
1183 	/* Sopt MAC controller */
1184 	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
1185 	update_cr6(db->cr6_data, dev->base_addr);
1186 	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
1187 	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1188 
1189 	/* Disable upper layer interface */
1190 	netif_stop_queue(dev);
1191 
1192 	/* Free Rx Allocate buffer */
1193 	dmfe_free_rxbuffer(db);
1194 
1195 	/* system variable init */
1196 	db->tx_packet_cnt = 0;
1197 	db->tx_queue_cnt = 0;
1198 	db->rx_avail_cnt = 0;
1199 	db->link_failed = 1;
1200 	db->wait_reset = 0;
1201 
1202 	/* Re-initilize DM910X board */
1203 	dmfe_init_dm910x(dev);
1204 
1205 	/* Restart upper layer interface */
1206 	netif_wake_queue(dev);
1207 }
1208 
1209 
1210 /*
1211  *	free all allocated rx buffer
1212  */
1213 
dmfe_free_rxbuffer(struct dmfe_board_info * db)1214 static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1215 {
1216 	DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1217 
1218 	/* free allocated rx buffer */
1219 	while (db->rx_avail_cnt) {
1220 		dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1221 		db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1222 		db->rx_avail_cnt--;
1223 	}
1224 }
1225 
1226 
1227 /*
1228  *	Reuse the SK buffer
1229  */
1230 
dmfe_reuse_skb(struct dmfe_board_info * db,struct sk_buff * skb)1231 static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1232 {
1233 	struct rx_desc *rxptr = db->rx_insert_ptr;
1234 
1235 	if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1236 		rxptr->rx_skb_ptr = skb;
1237 		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1238 		wmb();
1239 		rxptr->rdes0 = cpu_to_le32(0x80000000);
1240 		db->rx_avail_cnt++;
1241 		db->rx_insert_ptr = rxptr->next_rx_desc;
1242 	} else
1243 		DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1244 }
1245 
1246 
1247 /*
1248  *	Initialize transmit/Receive descriptor
1249  *	Using Chain structure, and allocate Tx/Rx buffer
1250  */
1251 
dmfe_descriptor_init(struct dmfe_board_info * db,unsigned long ioaddr)1252 static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1253 {
1254 	struct tx_desc *tmp_tx;
1255 	struct rx_desc *tmp_rx;
1256 	unsigned char *tmp_buf;
1257 	dma_addr_t tmp_tx_dma, tmp_rx_dma;
1258 	dma_addr_t tmp_buf_dma;
1259 	int i;
1260 
1261 	DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1262 
1263 	/* tx descriptor start pointer */
1264 	db->tx_insert_ptr = db->first_tx_desc;
1265 	db->tx_remove_ptr = db->first_tx_desc;
1266 	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
1267 
1268 	/* rx descriptor start pointer */
1269 	db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1270 	db->first_rx_desc_dma =  db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1271 	db->rx_insert_ptr = db->first_rx_desc;
1272 	db->rx_ready_ptr = db->first_rx_desc;
1273 	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
1274 
1275 	/* Init Transmit chain */
1276 	tmp_buf = db->buf_pool_start;
1277 	tmp_buf_dma = db->buf_pool_dma_start;
1278 	tmp_tx_dma = db->first_tx_desc_dma;
1279 	for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1280 		tmp_tx->tx_buf_ptr = tmp_buf;
1281 		tmp_tx->tdes0 = cpu_to_le32(0);
1282 		tmp_tx->tdes1 = cpu_to_le32(0x81000000);	/* IC, chain */
1283 		tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1284 		tmp_tx_dma += sizeof(struct tx_desc);
1285 		tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1286 		tmp_tx->next_tx_desc = tmp_tx + 1;
1287 		tmp_buf = tmp_buf + TX_BUF_ALLOC;
1288 		tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1289 	}
1290 	(--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1291 	tmp_tx->next_tx_desc = db->first_tx_desc;
1292 
1293 	 /* Init Receive descriptor chain */
1294 	tmp_rx_dma=db->first_rx_desc_dma;
1295 	for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1296 		tmp_rx->rdes0 = cpu_to_le32(0);
1297 		tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1298 		tmp_rx_dma += sizeof(struct rx_desc);
1299 		tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1300 		tmp_rx->next_rx_desc = tmp_rx + 1;
1301 	}
1302 	(--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1303 	tmp_rx->next_rx_desc = db->first_rx_desc;
1304 
1305 	/* pre-allocate Rx buffer */
1306 	allocate_rx_buffer(db);
1307 }
1308 
1309 
1310 /*
1311  *	Update CR6 value
1312  *	Firstly stop DM910X , then written value and start
1313  */
1314 
update_cr6(u32 cr6_data,unsigned long ioaddr)1315 static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1316 {
1317 	u32 cr6_tmp;
1318 
1319 	cr6_tmp = cr6_data & ~0x2002;           /* stop Tx/Rx */
1320 	outl(cr6_tmp, ioaddr + DCR6);
1321 	udelay(5);
1322 	outl(cr6_data, ioaddr + DCR6);
1323 	udelay(5);
1324 }
1325 
1326 
1327 /*
1328  *	Send a setup frame for DM9132
1329  *	This setup frame initilize DM910X addres filter mode
1330 */
1331 
dm9132_id_table(struct DEVICE * dev,int mc_cnt)1332 static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1333 {
1334 	struct dev_mc_list *mcptr;
1335 	u16 * addrptr;
1336 	unsigned long ioaddr = dev->base_addr+0xc0;		/* ID Table */
1337 	u32 hash_val;
1338 	u16 i, hash_table[4];
1339 
1340 	DMFE_DBUG(0, "dm9132_id_table()", 0);
1341 
1342 	/* Node address */
1343 	addrptr = (u16 *) dev->dev_addr;
1344 	outw(addrptr[0], ioaddr);
1345 	ioaddr += 4;
1346 	outw(addrptr[1], ioaddr);
1347 	ioaddr += 4;
1348 	outw(addrptr[2], ioaddr);
1349 	ioaddr += 4;
1350 
1351 	/* Clear Hash Table */
1352 	for (i = 0; i < 4; i++)
1353 		hash_table[i] = 0x0;
1354 
1355 	/* broadcast address */
1356 	hash_table[3] = 0x8000;
1357 
1358 	/* the multicast address in Hash Table : 64 bits */
1359 	for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1360 		hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1361 		hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1362 	}
1363 
1364 	/* Write the hash table to MAC MD table */
1365 	for (i = 0; i < 4; i++, ioaddr += 4)
1366 		outw(hash_table[i], ioaddr);
1367 }
1368 
1369 
1370 /*
1371  *	Send a setup frame for DM9102/DM9102A
1372  *	This setup frame initilize DM910X addres filter mode
1373  */
1374 
send_filter_frame(struct DEVICE * dev,int mc_cnt)1375 static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1376 {
1377 	struct dmfe_board_info *db = dev->priv;
1378 	struct dev_mc_list *mcptr;
1379 	struct tx_desc *txptr;
1380 	u16 * addrptr;
1381 	u32 * suptr;
1382 	int i;
1383 
1384 	DMFE_DBUG(0, "send_filter_frame()", 0);
1385 
1386 	txptr = db->tx_insert_ptr;
1387 	suptr = (u32 *) txptr->tx_buf_ptr;
1388 
1389 	/* Node address */
1390 	addrptr = (u16 *) dev->dev_addr;
1391 	*suptr++ = addrptr[0];
1392 	*suptr++ = addrptr[1];
1393 	*suptr++ = addrptr[2];
1394 
1395 	/* broadcast address */
1396 	*suptr++ = 0xffff;
1397 	*suptr++ = 0xffff;
1398 	*suptr++ = 0xffff;
1399 
1400 	/* fit the multicast address */
1401 	for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1402 		addrptr = (u16 *) mcptr->dmi_addr;
1403 		*suptr++ = addrptr[0];
1404 		*suptr++ = addrptr[1];
1405 		*suptr++ = addrptr[2];
1406 	}
1407 
1408 	for (; i<14; i++) {
1409 		*suptr++ = 0xffff;
1410 		*suptr++ = 0xffff;
1411 		*suptr++ = 0xffff;
1412 	}
1413 
1414 	/* prepare the setup frame */
1415 	db->tx_insert_ptr = txptr->next_tx_desc;
1416 	txptr->tdes1 = cpu_to_le32(0x890000c0);
1417 
1418 	/* Resource Check and Send the setup packet */
1419 	if (!db->tx_packet_cnt) {
1420 		/* Resource Empty */
1421 		db->tx_packet_cnt++;
1422 		txptr->tdes0 = cpu_to_le32(0x80000000);
1423 		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1424 		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
1425 		update_cr6(db->cr6_data, dev->base_addr);
1426 		dev->trans_start = jiffies;
1427 	} else
1428 		db->tx_queue_cnt++;	/* Put in TX queue */
1429 }
1430 
1431 
1432 /*
1433  *	Allocate rx buffer,
1434  *	As possible as allocate maxiumn Rx buffer
1435  */
1436 
allocate_rx_buffer(struct dmfe_board_info * db)1437 static void allocate_rx_buffer(struct dmfe_board_info *db)
1438 {
1439 	struct rx_desc *rxptr;
1440 	struct sk_buff *skb;
1441 
1442 	rxptr = db->rx_insert_ptr;
1443 
1444 	while(db->rx_avail_cnt < RX_DESC_CNT) {
1445 		if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1446 			break;
1447 		rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1448 		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1449 		wmb();
1450 		rxptr->rdes0 = cpu_to_le32(0x80000000);
1451 		rxptr = rxptr->next_rx_desc;
1452 		db->rx_avail_cnt++;
1453 	}
1454 
1455 	db->rx_insert_ptr = rxptr;
1456 }
1457 
1458 
1459 /*
1460  *	Read one word data from the serial ROM
1461  */
1462 
read_srom_word(long ioaddr,int offset)1463 static u16 read_srom_word(long ioaddr, int offset)
1464 {
1465 	int i;
1466 	u16 srom_data = 0;
1467 	long cr9_ioaddr = ioaddr + DCR9;
1468 
1469 	outl(CR9_SROM_READ, cr9_ioaddr);
1470 	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1471 
1472 	/* Send the Read Command 110b */
1473 	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1474 	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1475 	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1476 
1477 	/* Send the offset */
1478 	for (i = 5; i >= 0; i--) {
1479 		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1480 		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1481 	}
1482 
1483 	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1484 
1485 	for (i = 16; i > 0; i--) {
1486 		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1487 		udelay(5);
1488 		srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1489 		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1490 		udelay(5);
1491 	}
1492 
1493 	outl(CR9_SROM_READ, cr9_ioaddr);
1494 	return srom_data;
1495 }
1496 
1497 
1498 /*
1499  *	Auto sense the media mode
1500  */
1501 
dmfe_sense_speed(struct dmfe_board_info * db)1502 static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1503 {
1504 	u8 ErrFlag = 0;
1505 	u16 phy_mode;
1506 
1507 	/* CR6 bit18=0, select 10/100M */
1508 	update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1509 
1510 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1511 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1512 
1513 	if ( (phy_mode & 0x24) == 0x24 ) {
1514 		if (db->chip_id == PCI_DM9132_ID)	/* DM9132 */
1515 			phy_mode = phy_read(db->ioaddr, db->phy_addr, 7, db->chip_id) & 0xf000;
1516 		else 				/* DM9102/DM9102A */
1517 			phy_mode = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0xf000;
1518 		/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1519 		switch (phy_mode) {
1520 		case 0x1000: db->op_mode = DMFE_10MHF; break;
1521 		case 0x2000: db->op_mode = DMFE_10MFD; break;
1522 		case 0x4000: db->op_mode = DMFE_100MHF; break;
1523 		case 0x8000: db->op_mode = DMFE_100MFD; break;
1524 		default: db->op_mode = DMFE_10MHF;
1525 			ErrFlag = 1;
1526 			break;
1527 		}
1528 	} else {
1529 		db->op_mode = DMFE_10MHF;
1530 		DMFE_DBUG(0, "Link Failed :", phy_mode);
1531 		ErrFlag = 1;
1532 	}
1533 
1534 	return ErrFlag;
1535 }
1536 
1537 
1538 /*
1539  *	Set 10/100 phyxcer capability
1540  *	AUTO mode : phyxcer register4 is NIC capability
1541  *	Force mode: phyxcer register4 is the force media
1542  */
1543 
dmfe_set_phyxcer(struct dmfe_board_info * db)1544 static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1545 {
1546 	u16 phy_reg;
1547 
1548 	/* Select 10/100M phyxcer */
1549 	db->cr6_data &= ~0x40000;
1550 	update_cr6(db->cr6_data, db->ioaddr);
1551 
1552 	/* DM9009 Chip: Phyxcer reg18 bit12=0 */
1553 	if (db->chip_id == PCI_DM9009_ID) {
1554 		phy_reg = phy_read(db->ioaddr, db->phy_addr, 18, db->chip_id) & ~0x1000;
1555 		phy_write(db->ioaddr, db->phy_addr, 18, phy_reg, db->chip_id);
1556 	}
1557 
1558 	/* Phyxcer capability setting */
1559 	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1560 
1561 	if (db->media_mode & DMFE_AUTO) {
1562 		/* AUTO Mode */
1563 		phy_reg |= db->PHY_reg4;
1564 	} else {
1565 		/* Force Mode */
1566 		switch(db->media_mode) {
1567 		case DMFE_10MHF: phy_reg |= 0x20; break;
1568 		case DMFE_10MFD: phy_reg |= 0x40; break;
1569 		case DMFE_100MHF: phy_reg |= 0x80; break;
1570 		case DMFE_100MFD: phy_reg |= 0x100; break;
1571 		}
1572 		if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1573 	}
1574 
1575   	/* Write new capability to Phyxcer Reg4 */
1576 	if ( !(phy_reg & 0x01e0)) {
1577 		phy_reg|=db->PHY_reg4;
1578 		db->media_mode|=DMFE_AUTO;
1579 	}
1580 	phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1581 
1582  	/* Restart Auto-Negotiation */
1583 	if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1584 		phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1585 	if ( !db->chip_type )
1586 		phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1587 }
1588 
1589 
1590 /*
1591  *	Process op-mode
1592  *	AUTO mode : PHY controller in Auto-negotiation Mode
1593  *	Force mode: PHY controller in force mode with HUB
1594  *			N-way force capability with SWITCH
1595  */
1596 
dmfe_process_mode(struct dmfe_board_info * db)1597 static void dmfe_process_mode(struct dmfe_board_info *db)
1598 {
1599 	u16 phy_reg;
1600 
1601 	/* Full Duplex Mode Check */
1602 	if (db->op_mode & 0x4)
1603 		db->cr6_data |= CR6_FDM;	/* Set Full Duplex Bit */
1604 	else
1605 		db->cr6_data &= ~CR6_FDM;	/* Clear Full Duplex Bit */
1606 
1607 	/* Transciver Selection */
1608 	if (db->op_mode & 0x10)		/* 1M HomePNA */
1609 		db->cr6_data |= 0x40000;/* External MII select */
1610 	else
1611 		db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1612 
1613 	update_cr6(db->cr6_data, db->ioaddr);
1614 
1615 	/* 10/100M phyxcer force mode need */
1616 	if ( !(db->media_mode & 0x18)) {
1617 		/* Forece Mode */
1618 		phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1619 		if ( !(phy_reg & 0x1) ) {
1620 			/* parter without N-Way capability */
1621 			phy_reg = 0x0;
1622 			switch(db->op_mode) {
1623 			case DMFE_10MHF: phy_reg = 0x0; break;
1624 			case DMFE_10MFD: phy_reg = 0x100; break;
1625 			case DMFE_100MHF: phy_reg = 0x2000; break;
1626 			case DMFE_100MFD: phy_reg = 0x2100; break;
1627 			}
1628 			phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1629        			if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1630 				mdelay(20);
1631 			phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1632 		}
1633 	}
1634 }
1635 
1636 
1637 /*
1638  *	Write a word to Phy register
1639  */
1640 
phy_write(unsigned long iobase,u8 phy_addr,u8 offset,u16 phy_data,u32 chip_id)1641 static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1642 {
1643 	u16 i;
1644 	unsigned long ioaddr;
1645 
1646 	if (chip_id == PCI_DM9132_ID) {
1647 		ioaddr = iobase + 0x80 + offset * 4;
1648 		outw(phy_data, ioaddr);
1649 	} else {
1650 		/* DM9102/DM9102A Chip */
1651 		ioaddr = iobase + DCR9;
1652 
1653 		/* Send 33 synchronization clock to Phy controller */
1654 		for (i = 0; i < 35; i++)
1655 			phy_write_1bit(ioaddr, PHY_DATA_1);
1656 
1657 		/* Send start command(01) to Phy */
1658 		phy_write_1bit(ioaddr, PHY_DATA_0);
1659 		phy_write_1bit(ioaddr, PHY_DATA_1);
1660 
1661 		/* Send write command(01) to Phy */
1662 		phy_write_1bit(ioaddr, PHY_DATA_0);
1663 		phy_write_1bit(ioaddr, PHY_DATA_1);
1664 
1665 		/* Send Phy addres */
1666 		for (i = 0x10; i > 0; i = i >> 1)
1667 			phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1668 
1669 		/* Send register addres */
1670 		for (i = 0x10; i > 0; i = i >> 1)
1671 			phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1672 
1673 		/* written trasnition */
1674 		phy_write_1bit(ioaddr, PHY_DATA_1);
1675 		phy_write_1bit(ioaddr, PHY_DATA_0);
1676 
1677 		/* Write a word data to PHY controller */
1678 		for ( i = 0x8000; i > 0; i >>= 1)
1679 			phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1680 	}
1681 }
1682 
1683 
1684 /*
1685  *	Read a word data from phy register
1686  */
1687 
phy_read(unsigned long iobase,u8 phy_addr,u8 offset,u32 chip_id)1688 static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1689 {
1690 	int i;
1691 	u16 phy_data;
1692 	unsigned long ioaddr;
1693 
1694 	if (chip_id == PCI_DM9132_ID) {
1695 		/* DM9132 Chip */
1696 		ioaddr = iobase + 0x80 + offset * 4;
1697 		phy_data = inw(ioaddr);
1698 	} else {
1699 		/* DM9102/DM9102A Chip */
1700 		ioaddr = iobase + DCR9;
1701 
1702 		/* Send 33 synchronization clock to Phy controller */
1703 		for (i = 0; i < 35; i++)
1704 			phy_write_1bit(ioaddr, PHY_DATA_1);
1705 
1706 		/* Send start command(01) to Phy */
1707 		phy_write_1bit(ioaddr, PHY_DATA_0);
1708 		phy_write_1bit(ioaddr, PHY_DATA_1);
1709 
1710 		/* Send read command(10) to Phy */
1711 		phy_write_1bit(ioaddr, PHY_DATA_1);
1712 		phy_write_1bit(ioaddr, PHY_DATA_0);
1713 
1714 		/* Send Phy addres */
1715 		for (i = 0x10; i > 0; i = i >> 1)
1716 			phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1717 
1718 		/* Send register addres */
1719 		for (i = 0x10; i > 0; i = i >> 1)
1720 			phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1721 
1722 		/* Skip transition state */
1723 		phy_read_1bit(ioaddr);
1724 
1725 		/* read 16bit data */
1726 		for (phy_data = 0, i = 0; i < 16; i++) {
1727 			phy_data <<= 1;
1728 			phy_data |= phy_read_1bit(ioaddr);
1729 		}
1730 	}
1731 
1732 	return phy_data;
1733 }
1734 
1735 
1736 /*
1737  *	Write one bit data to Phy Controller
1738  */
1739 
phy_write_1bit(unsigned long ioaddr,u32 phy_data)1740 static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1741 {
1742 	outl(phy_data, ioaddr);			/* MII Clock Low */
1743 	udelay(1);
1744 	outl(phy_data | MDCLKH, ioaddr);	/* MII Clock High */
1745 	udelay(1);
1746 	outl(phy_data, ioaddr);			/* MII Clock Low */
1747 	udelay(1);
1748 }
1749 
1750 
1751 /*
1752  *	Read one bit phy data from PHY controller
1753  */
1754 
phy_read_1bit(unsigned long ioaddr)1755 static u16 phy_read_1bit(unsigned long ioaddr)
1756 {
1757 	u16 phy_data;
1758 
1759 	outl(0x50000, ioaddr);
1760 	udelay(1);
1761 	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1762 	outl(0x40000, ioaddr);
1763 	udelay(1);
1764 
1765 	return phy_data;
1766 }
1767 
1768 
1769 /*
1770  *	Parser SROM and media mode
1771  */
1772 
dmfe_parse_srom(struct dmfe_board_info * db)1773 static void dmfe_parse_srom(struct dmfe_board_info * db)
1774 {
1775 	char * srom = db->srom;
1776 	int dmfe_mode, tmp_reg;
1777 
1778 	DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1779 
1780 	/* Init CR15 */
1781 	db->cr15_data = CR15_DEFAULT;
1782 
1783 	/* Check SROM Version */
1784 	if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1785 		/* SROM V4.01 */
1786 		/* Get NIC support media mode */
1787 		db->NIC_capability = le16_to_cpup(srom + 34);
1788 		db->PHY_reg4 = 0;
1789 		for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1790 			switch( db->NIC_capability & tmp_reg ) {
1791 			case 0x1: db->PHY_reg4 |= 0x0020; break;
1792 			case 0x2: db->PHY_reg4 |= 0x0040; break;
1793 			case 0x4: db->PHY_reg4 |= 0x0080; break;
1794 			case 0x8: db->PHY_reg4 |= 0x0100; break;
1795 			}
1796 		}
1797 
1798 		/* Media Mode Force or not check */
1799 		dmfe_mode = le32_to_cpup(srom + 34) & le32_to_cpup(srom + 36);
1800 		switch(dmfe_mode) {
1801 		case 0x4: dmfe_media_mode = DMFE_100MHF; break;	/* 100MHF */
1802 		case 0x2: dmfe_media_mode = DMFE_10MFD; break;	/* 10MFD */
1803 		case 0x8: dmfe_media_mode = DMFE_100MFD; break;	/* 100MFD */
1804 		case 0x100:
1805 		case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1806 		}
1807 
1808 		/* Special Function setting */
1809 		/* VLAN function */
1810 		if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1811 			db->cr15_data |= 0x40;
1812 
1813 		/* Flow Control */
1814 		if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1815 			db->cr15_data |= 0x400;
1816 
1817 		/* TX pause packet */
1818 		if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1819 			db->cr15_data |= 0x9800;
1820 	}
1821 
1822 	/* Parse HPNA parameter */
1823 	db->HPNA_command = 1;
1824 
1825 	/* Accept remote command or not */
1826 	if (HPNA_rx_cmd == 0)
1827 		db->HPNA_command |= 0x8000;
1828 
1829 	 /* Issue remote command & operation mode */
1830 	if (HPNA_tx_cmd == 1)
1831 		switch(HPNA_mode) {	/* Issue Remote Command */
1832 		case 0: db->HPNA_command |= 0x0904; break;
1833 		case 1: db->HPNA_command |= 0x0a00; break;
1834 		case 2: db->HPNA_command |= 0x0506; break;
1835 		case 3: db->HPNA_command |= 0x0602; break;
1836 		}
1837 	else
1838 		switch(HPNA_mode) {	/* Don't Issue */
1839 		case 0: db->HPNA_command |= 0x0004; break;
1840 		case 1: db->HPNA_command |= 0x0000; break;
1841 		case 2: db->HPNA_command |= 0x0006; break;
1842 		case 3: db->HPNA_command |= 0x0002; break;
1843 		}
1844 
1845 	/* Check DM9801 or DM9802 present or not */
1846 	db->HPNA_present = 0;
1847 	update_cr6(db->cr6_data|0x40000, db->ioaddr);
1848 	tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1849 	if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1850 		/* DM9801 or DM9802 present */
1851 		db->HPNA_timer = 8;
1852 		if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1853 			/* DM9801 HomeRun */
1854 			db->HPNA_present = 1;
1855 			dmfe_program_DM9801(db, tmp_reg);
1856 		} else {
1857 			/* DM9802 LongRun */
1858 			db->HPNA_present = 2;
1859 			dmfe_program_DM9802(db);
1860 		}
1861 	}
1862 
1863 }
1864 
1865 
1866 /*
1867  *	Init HomeRun DM9801
1868  */
1869 
dmfe_program_DM9801(struct dmfe_board_info * db,int HPNA_rev)1870 static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1871 {
1872 	uint reg17, reg25;
1873 
1874 	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
1875 	switch(HPNA_rev) {
1876 	case 0xb900: /* DM9801 E3 */
1877 		db->HPNA_command |= 0x1000;
1878 		reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
1879 		reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
1880 		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1881 		break;
1882 	case 0xb901: /* DM9801 E4 */
1883 		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1884 		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
1885 		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1886 		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
1887 		break;
1888 	case 0xb902: /* DM9801 E5 */
1889 	case 0xb903: /* DM9801 E6 */
1890 	default:
1891 		db->HPNA_command |= 0x1000;
1892 		reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1893 		reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
1894 		reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1895 		reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
1896 		break;
1897 	}
1898 	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1899 	phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
1900 	phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
1901 }
1902 
1903 
1904 /*
1905  *	Init HomeRun DM9802
1906  */
1907 
dmfe_program_DM9802(struct dmfe_board_info * db)1908 static void dmfe_program_DM9802(struct dmfe_board_info * db)
1909 {
1910 	uint phy_reg;
1911 
1912 	if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
1913 	phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1914 	phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1915 	phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
1916 	phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
1917 }
1918 
1919 
1920 /*
1921  *	Check remote HPNA power and speed status. If not correct,
1922  *	issue command again.
1923 */
1924 
dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)1925 static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
1926 {
1927 	uint phy_reg;
1928 
1929 	/* Got remote device status */
1930 	phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
1931 	switch(phy_reg) {
1932 	case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
1933 	case 0x20: phy_reg = 0x0900;break; /* LP/HS */
1934 	case 0x40: phy_reg = 0x0600;break; /* HP/LS */
1935 	case 0x60: phy_reg = 0x0500;break; /* HP/HS */
1936 	}
1937 
1938 	/* Check remote device status match our setting ot not */
1939 	if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
1940 		phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1941 		db->HPNA_timer=8;
1942 	} else
1943 		db->HPNA_timer=600;	/* Match, every 10 minutes, check */
1944 }
1945 
1946 
1947 
1948 static struct pci_device_id dmfe_pci_tbl[] __devinitdata = {
1949 	{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
1950 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
1951 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
1952 	{ 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
1953 	{ 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
1954 	{ 0, }
1955 };
1956 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
1957 
1958 
1959 static struct pci_driver dmfe_driver = {
1960 	name:		"dmfe",
1961 	id_table:	dmfe_pci_tbl,
1962 	probe:		dmfe_init_one,
1963 	remove:		__devexit_p(dmfe_remove_one),
1964 };
1965 
1966 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
1967 MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
1968 MODULE_LICENSE("GPL");
1969 
1970 MODULE_PARM(debug, "i");
1971 MODULE_PARM(mode, "i");
1972 MODULE_PARM(cr6set, "i");
1973 MODULE_PARM(chkmode, "i");
1974 MODULE_PARM(HPNA_mode, "i");
1975 MODULE_PARM(HPNA_rx_cmd, "i");
1976 MODULE_PARM(HPNA_tx_cmd, "i");
1977 MODULE_PARM(HPNA_NoiseFloor, "i");
1978 MODULE_PARM(SF_mode, "i");
1979 MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
1980 MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
1981 MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
1982 
1983 /*	Description:
1984  *	when user used insmod to add module, system invoked init_module()
1985  *	to initilize and register.
1986  */
1987 
dmfe_init_module(void)1988 static int __init dmfe_init_module(void)
1989 {
1990 	int rc;
1991 
1992 	printk(version);
1993 	printed_version = 1;
1994 
1995 	DMFE_DBUG(0, "init_module() ", debug);
1996 
1997 	if (debug)
1998 		dmfe_debug = debug;	/* set debug flag */
1999 	if (cr6set)
2000 		dmfe_cr6_user_set = cr6set;
2001 
2002  	switch(mode) {
2003    	case DMFE_10MHF:
2004 	case DMFE_100MHF:
2005 	case DMFE_10MFD:
2006 	case DMFE_100MFD:
2007 	case DMFE_1M_HPNA:
2008 		dmfe_media_mode = mode;
2009 		break;
2010 	default:dmfe_media_mode = DMFE_AUTO;
2011 		break;
2012 	}
2013 
2014 	if (HPNA_mode > 4)
2015 		HPNA_mode = 0;		/* Default: LP/HS */
2016 	if (HPNA_rx_cmd > 1)
2017 		HPNA_rx_cmd = 0;	/* Default: Ignored remote cmd */
2018 	if (HPNA_tx_cmd > 1)
2019 		HPNA_tx_cmd = 0;	/* Default: Don't issue remote cmd */
2020 	if (HPNA_NoiseFloor > 15)
2021 		HPNA_NoiseFloor = 0;
2022 
2023 	rc = pci_module_init(&dmfe_driver);
2024 	if (rc < 0)
2025 		return rc;
2026 
2027 	return 0;
2028 }
2029 
2030 
2031 /*
2032  *	Description:
2033  *	when user used rmmod to delete module, system invoked clean_module()
2034  *	to un-register all registered services.
2035  */
2036 
dmfe_cleanup_module(void)2037 static void __exit dmfe_cleanup_module(void)
2038 {
2039 	DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2040 	pci_unregister_driver(&dmfe_driver);
2041 }
2042 
2043 module_init(dmfe_init_module);
2044 module_exit(dmfe_cleanup_module);
2045