1 /*	tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2 
3 	Copyright 2000,2001  The Linux Kernel Team
4 	Written/copyright 1994-2001 by Donald Becker.
5 
6 	This software may be used and distributed according to the terms
7 	of the GNU General Public License, incorporated herein by reference.
8 
9 	Please submit bugs to http://bugzilla.kernel.org/ .
10 */
11 
12 #define pr_fmt(fmt) "tulip: " fmt
13 
14 #define DRV_NAME	"tulip"
15 #ifdef CONFIG_TULIP_NAPI
16 #define DRV_VERSION    "1.1.15-NAPI" /* Keep at least for test */
17 #else
18 #define DRV_VERSION	"1.1.15"
19 #endif
20 #define DRV_RELDATE	"Feb 27, 2007"
21 
22 
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include "tulip.h"
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/etherdevice.h>
30 #include <linux/delay.h>
31 #include <linux/mii.h>
32 #include <linux/crc32.h>
33 #include <asm/unaligned.h>
34 #include <asm/uaccess.h>
35 
36 #ifdef CONFIG_SPARC
37 #include <asm/prom.h>
38 #endif
39 
40 static char version[] __devinitdata =
41 	"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42 
43 /* A few user-configurable values. */
44 
45 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
46 static unsigned int max_interrupt_work = 25;
47 
48 #define MAX_UNITS 8
49 /* Used to pass the full-duplex flag, etc. */
50 static int full_duplex[MAX_UNITS];
51 static int options[MAX_UNITS];
52 static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
53 
54 /*  The possible media types that can be set in options[] are: */
55 const char * const medianame[32] = {
56 	"10baseT", "10base2", "AUI", "100baseTx",
57 	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 	"","","","", "","","","",  "","","","Transceiver reset",
62 };
63 
64 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 	defined(CONFIG_SPARC) || defined(__ia64__) || \
67 	defined(__sh__) || defined(__mips__)
68 static int rx_copybreak = 1518;
69 #else
70 static int rx_copybreak = 100;
71 #endif
72 
73 /*
74   Set the bus performance register.
75 	Typical: Set 16 longword cache alignment, no burst limit.
76 	Cache alignment bits 15:14	     Burst length 13:8
77 		0000	No alignment  0x00000000 unlimited		0800 8 longwords
78 		4000	8  longwords		0100 1 longword		1000 16 longwords
79 		8000	16 longwords		0200 2 longwords	2000 32 longwords
80 		C000	32  longwords		0400 4 longwords
81 	Warning: many older 486 systems are broken and require setting 0x00A04800
82 	   8 longword cache alignment, 8 longword burst.
83 	ToDo: Non-Intel setting could be better.
84 */
85 
86 #if defined(__alpha__) || defined(__ia64__)
87 static int csr0 = 0x01A00000 | 0xE000;
88 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89 static int csr0 = 0x01A00000 | 0x8000;
90 #elif defined(CONFIG_SPARC) || defined(__hppa__)
91 /* The UltraSparc PCI controllers will disconnect at every 64-byte
92  * crossing anyways so it makes no sense to tell Tulip to burst
93  * any more than that.
94  */
95 static int csr0 = 0x01A00000 | 0x9000;
96 #elif defined(__arm__) || defined(__sh__)
97 static int csr0 = 0x01A00000 | 0x4800;
98 #elif defined(__mips__)
99 static int csr0 = 0x00200000 | 0x4000;
100 #else
101 #warning Processor architecture undefined!
102 static int csr0 = 0x00A00000 | 0x4800;
103 #endif
104 
105 /* Operational parameters that usually are not changed. */
106 /* Time in jiffies before concluding the transmitter is hung. */
107 #define TX_TIMEOUT  (4*HZ)
108 
109 
110 MODULE_AUTHOR("The Linux Kernel Team");
111 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(DRV_VERSION);
114 module_param(tulip_debug, int, 0);
115 module_param(max_interrupt_work, int, 0);
116 module_param(rx_copybreak, int, 0);
117 module_param(csr0, int, 0);
118 module_param_array(options, int, NULL, 0);
119 module_param_array(full_duplex, int, NULL, 0);
120 
121 #ifdef TULIP_DEBUG
122 int tulip_debug = TULIP_DEBUG;
123 #else
124 int tulip_debug = 1;
125 #endif
126 
tulip_timer(unsigned long data)127 static void tulip_timer(unsigned long data)
128 {
129 	struct net_device *dev = (struct net_device *)data;
130 	struct tulip_private *tp = netdev_priv(dev);
131 
132 	if (netif_running(dev))
133 		schedule_work(&tp->media_work);
134 }
135 
136 /*
137  * This table use during operation for capabilities and media timer.
138  *
139  * It is indexed via the values in 'enum chips'
140  */
141 
142 struct tulip_chip_table tulip_tbl[] = {
143   { }, /* placeholder for array, slot unused currently */
144   { }, /* placeholder for array, slot unused currently */
145 
146   /* DC21140 */
147   { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
149 	tulip_media_task },
150 
151   /* DC21142, DC21143 */
152   { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
153 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
154 	| HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
155 
156   /* LC82C168 */
157   { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
158 	HAS_MII | HAS_PNICNWAY, pnic_timer, },
159 
160   /* MX98713 */
161   { "Macronix 98713 PMAC", 128, 0x0001ebef,
162 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
163 
164   /* MX98715 */
165   { "Macronix 98715 PMAC", 256, 0x0001ebef,
166 	HAS_MEDIA_TABLE, mxic_timer, },
167 
168   /* MX98725 */
169   { "Macronix 98725 PMAC", 256, 0x0001ebef,
170 	HAS_MEDIA_TABLE, mxic_timer, },
171 
172   /* AX88140 */
173   { "ASIX AX88140", 128, 0x0001fbff,
174 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
175 	| IS_ASIX, tulip_timer, tulip_media_task },
176 
177   /* PNIC2 */
178   { "Lite-On PNIC-II", 256, 0x0801fbff,
179 	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
180 
181   /* COMET */
182   { "ADMtek Comet", 256, 0x0001abef,
183 	HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
184 
185   /* COMPEX9881 */
186   { "Compex 9881 PMAC", 128, 0x0001ebef,
187 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
188 
189   /* I21145 */
190   { "Intel DS21145 Tulip", 128, 0x0801fbff,
191 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
192 	| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
193 
194   /* DM910X */
195 #ifdef CONFIG_TULIP_DM910X
196   { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
197 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
198 	tulip_timer, tulip_media_task },
199 #else
200   { NULL },
201 #endif
202 
203   /* RS7112 */
204   { "Conexant LANfinity", 256, 0x0001ebef,
205 	HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
206 
207 };
208 
209 
210 static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
211 	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
212 	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
213 	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
214 	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
215 	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
216 /*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
217 	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
218 	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
219 	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 	{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
230 	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
231 #ifdef CONFIG_TULIP_DM910X
232 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
234 #endif
235 	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
237 	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 	{ 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 	{ 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 	{ 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 	{ 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
242 	{ 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
247 	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
248 	{ 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
249 	{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 	{ } /* terminate list */
251 };
252 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
253 
254 
255 /* A full-duplex map for media types. */
256 const char tulip_media_cap[32] =
257 {0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
258 
259 static void tulip_tx_timeout(struct net_device *dev);
260 static void tulip_init_ring(struct net_device *dev);
261 static void tulip_free_ring(struct net_device *dev);
262 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
263 					  struct net_device *dev);
264 static int tulip_open(struct net_device *dev);
265 static int tulip_close(struct net_device *dev);
266 static void tulip_up(struct net_device *dev);
267 static void tulip_down(struct net_device *dev);
268 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
269 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
270 static void set_rx_mode(struct net_device *dev);
271 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
272 #ifdef CONFIG_NET_POLL_CONTROLLER
273 static void poll_tulip(struct net_device *dev);
274 #endif
275 
tulip_set_power_state(struct tulip_private * tp,int sleep,int snooze)276 static void tulip_set_power_state (struct tulip_private *tp,
277 				   int sleep, int snooze)
278 {
279 	if (tp->flags & HAS_ACPI) {
280 		u32 tmp, newtmp;
281 		pci_read_config_dword (tp->pdev, CFDD, &tmp);
282 		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
283 		if (sleep)
284 			newtmp |= CFDD_Sleep;
285 		else if (snooze)
286 			newtmp |= CFDD_Snooze;
287 		if (tmp != newtmp)
288 			pci_write_config_dword (tp->pdev, CFDD, newtmp);
289 	}
290 
291 }
292 
293 
tulip_up(struct net_device * dev)294 static void tulip_up(struct net_device *dev)
295 {
296 	struct tulip_private *tp = netdev_priv(dev);
297 	void __iomem *ioaddr = tp->base_addr;
298 	int next_tick = 3*HZ;
299 	u32 reg;
300 	int i;
301 
302 #ifdef CONFIG_TULIP_NAPI
303 	napi_enable(&tp->napi);
304 #endif
305 
306 	/* Wake the chip from sleep/snooze mode. */
307 	tulip_set_power_state (tp, 0, 0);
308 
309 	/* Disable all WOL events */
310 	pci_enable_wake(tp->pdev, PCI_D3hot, 0);
311 	pci_enable_wake(tp->pdev, PCI_D3cold, 0);
312 	tulip_set_wolopts(tp->pdev, 0);
313 
314 	/* On some chip revs we must set the MII/SYM port before the reset!? */
315 	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
316 		iowrite32(0x00040000, ioaddr + CSR6);
317 
318 	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
319 	iowrite32(0x00000001, ioaddr + CSR0);
320 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
321 	udelay(100);
322 
323 	/* Deassert reset.
324 	   Wait the specified 50 PCI cycles after a reset by initializing
325 	   Tx and Rx queues and the address filter list. */
326 	iowrite32(tp->csr0, ioaddr + CSR0);
327 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
328 	udelay(100);
329 
330 	if (tulip_debug > 1)
331 		netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq);
332 
333 	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
335 	tp->cur_rx = tp->cur_tx = 0;
336 	tp->dirty_rx = tp->dirty_tx = 0;
337 
338 	if (tp->flags & MC_HASH_ONLY) {
339 		u32 addr_low = get_unaligned_le32(dev->dev_addr);
340 		u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
341 		if (tp->chip_id == AX88140) {
342 			iowrite32(0, ioaddr + CSR13);
343 			iowrite32(addr_low,  ioaddr + CSR14);
344 			iowrite32(1, ioaddr + CSR13);
345 			iowrite32(addr_high, ioaddr + CSR14);
346 		} else if (tp->flags & COMET_MAC_ADDR) {
347 			iowrite32(addr_low,  ioaddr + 0xA4);
348 			iowrite32(addr_high, ioaddr + 0xA8);
349 			iowrite32(0, ioaddr + CSR27);
350 			iowrite32(0, ioaddr + CSR28);
351 		}
352 	} else {
353 		/* This is set_rx_mode(), but without starting the transmitter. */
354 		u16 *eaddrs = (u16 *)dev->dev_addr;
355 		u16 *setup_frm = &tp->setup_frame[15*6];
356 		dma_addr_t mapping;
357 
358 		/* 21140 bug: you must add the broadcast address. */
359 		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
360 		/* Fill the final entry of the table with our physical address. */
361 		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
362 		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
363 		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
364 
365 		mapping = pci_map_single(tp->pdev, tp->setup_frame,
366 					 sizeof(tp->setup_frame),
367 					 PCI_DMA_TODEVICE);
368 		tp->tx_buffers[tp->cur_tx].skb = NULL;
369 		tp->tx_buffers[tp->cur_tx].mapping = mapping;
370 
371 		/* Put the setup frame on the Tx list. */
372 		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
373 		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
374 		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
375 
376 		tp->cur_tx++;
377 	}
378 
379 	tp->saved_if_port = dev->if_port;
380 	if (dev->if_port == 0)
381 		dev->if_port = tp->default_port;
382 
383 	/* Allow selecting a default media. */
384 	i = 0;
385 	if (tp->mtable == NULL)
386 		goto media_picked;
387 	if (dev->if_port) {
388 		int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
389 			(dev->if_port == 12 ? 0 : dev->if_port);
390 		for (i = 0; i < tp->mtable->leafcount; i++)
391 			if (tp->mtable->mleaf[i].media == looking_for) {
392 				dev_info(&dev->dev,
393 					 "Using user-specified media %s\n",
394 					 medianame[dev->if_port]);
395 				goto media_picked;
396 			}
397 	}
398 	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
399 		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
400 		for (i = 0; i < tp->mtable->leafcount; i++)
401 			if (tp->mtable->mleaf[i].media == looking_for) {
402 				dev_info(&dev->dev,
403 					 "Using EEPROM-set media %s\n",
404 					 medianame[looking_for]);
405 				goto media_picked;
406 			}
407 	}
408 	/* Start sensing first non-full-duplex media. */
409 	for (i = tp->mtable->leafcount - 1;
410 		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
411 		;
412 media_picked:
413 
414 	tp->csr6 = 0;
415 	tp->cur_index = i;
416 	tp->nwayset = 0;
417 
418 	if (dev->if_port) {
419 		if (tp->chip_id == DC21143  &&
420 		    (tulip_media_cap[dev->if_port] & MediaIsMII)) {
421 			/* We must reset the media CSRs when we force-select MII mode. */
422 			iowrite32(0x0000, ioaddr + CSR13);
423 			iowrite32(0x0000, ioaddr + CSR14);
424 			iowrite32(0x0008, ioaddr + CSR15);
425 		}
426 		tulip_select_media(dev, 1);
427 	} else if (tp->chip_id == DC21142) {
428 		if (tp->mii_cnt) {
429 			tulip_select_media(dev, 1);
430 			if (tulip_debug > 1)
431 				dev_info(&dev->dev,
432 					 "Using MII transceiver %d, status %04x\n",
433 					 tp->phys[0],
434 					 tulip_mdio_read(dev, tp->phys[0], 1));
435 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
436 			tp->csr6 = csr6_mask_hdcap;
437 			dev->if_port = 11;
438 			iowrite32(0x0000, ioaddr + CSR13);
439 			iowrite32(0x0000, ioaddr + CSR14);
440 		} else
441 			t21142_start_nway(dev);
442 	} else if (tp->chip_id == PNIC2) {
443 	        /* for initial startup advertise 10/100 Full and Half */
444 	        tp->sym_advertise = 0x01E0;
445                 /* enable autonegotiate end interrupt */
446 	        iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
447 	        iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
448 		pnic2_start_nway(dev);
449 	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
450 		if (tp->mii_cnt) {
451 			dev->if_port = 11;
452 			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
453 			iowrite32(0x0001, ioaddr + CSR15);
454 		} else if (ioread32(ioaddr + CSR5) & TPLnkPass)
455 			pnic_do_nway(dev);
456 		else {
457 			/* Start with 10mbps to do autonegotiation. */
458 			iowrite32(0x32, ioaddr + CSR12);
459 			tp->csr6 = 0x00420000;
460 			iowrite32(0x0001B078, ioaddr + 0xB8);
461 			iowrite32(0x0201B078, ioaddr + 0xB8);
462 			next_tick = 1*HZ;
463 		}
464 	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
465 		   ! tp->medialock) {
466 		dev->if_port = 0;
467 		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
468 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
469 	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
470 		/* Provided by BOLO, Macronix - 12/10/1998. */
471 		dev->if_port = 0;
472 		tp->csr6 = 0x01a80200;
473 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
474 		iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
475 	} else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
476 		/* Enable automatic Tx underrun recovery. */
477 		iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
478 		dev->if_port = tp->mii_cnt ? 11 : 0;
479 		tp->csr6 = 0x00040000;
480 	} else if (tp->chip_id == AX88140) {
481 		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
482 	} else
483 		tulip_select_media(dev, 1);
484 
485 	/* Start the chip's Tx to process setup frame. */
486 	tulip_stop_rxtx(tp);
487 	barrier();
488 	udelay(5);
489 	iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
490 
491 	/* Enable interrupts by setting the interrupt mask. */
492 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
493 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
494 	tulip_start_rxtx(tp);
495 	iowrite32(0, ioaddr + CSR2);		/* Rx poll demand */
496 
497 	if (tulip_debug > 2) {
498 		netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
499 			   ioread32(ioaddr + CSR0),
500 			   ioread32(ioaddr + CSR5),
501 			   ioread32(ioaddr + CSR6));
502 	}
503 
504 	/* Set the timer to switch to check for link beat and perhaps switch
505 	   to an alternate media type. */
506 	tp->timer.expires = RUN_AT(next_tick);
507 	add_timer(&tp->timer);
508 #ifdef CONFIG_TULIP_NAPI
509 	init_timer(&tp->oom_timer);
510         tp->oom_timer.data = (unsigned long)dev;
511         tp->oom_timer.function = oom_timer;
512 #endif
513 }
514 
515 static int
tulip_open(struct net_device * dev)516 tulip_open(struct net_device *dev)
517 {
518 	int retval;
519 
520 	tulip_init_ring (dev);
521 
522 	retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
523 	if (retval)
524 		goto free_ring;
525 
526 	tulip_up (dev);
527 
528 	netif_start_queue (dev);
529 
530 	return 0;
531 
532 free_ring:
533 	tulip_free_ring (dev);
534 	return retval;
535 }
536 
537 
tulip_tx_timeout(struct net_device * dev)538 static void tulip_tx_timeout(struct net_device *dev)
539 {
540 	struct tulip_private *tp = netdev_priv(dev);
541 	void __iomem *ioaddr = tp->base_addr;
542 	unsigned long flags;
543 
544 	spin_lock_irqsave (&tp->lock, flags);
545 
546 	if (tulip_media_cap[dev->if_port] & MediaIsMII) {
547 		/* Do nothing -- the media monitor should handle this. */
548 		if (tulip_debug > 1)
549 			dev_warn(&dev->dev,
550 				 "Transmit timeout using MII device\n");
551 	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
552 		   tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
553 		   tp->chip_id == DM910X) {
554 		dev_warn(&dev->dev,
555 			 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
556 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
557 			 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
558 			 ioread32(ioaddr + CSR15));
559 		tp->timeout_recovery = 1;
560 		schedule_work(&tp->media_work);
561 		goto out_unlock;
562 	} else if (tp->chip_id == PNIC2) {
563 		dev_warn(&dev->dev,
564 			 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
565 			 (int)ioread32(ioaddr + CSR5),
566 			 (int)ioread32(ioaddr + CSR6),
567 			 (int)ioread32(ioaddr + CSR7),
568 			 (int)ioread32(ioaddr + CSR12));
569 	} else {
570 		dev_warn(&dev->dev,
571 			 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
572 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
573 		dev->if_port = 0;
574 	}
575 
576 #if defined(way_too_many_messages)
577 	if (tulip_debug > 3) {
578 		int i;
579 		for (i = 0; i < RX_RING_SIZE; i++) {
580 			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
581 			int j;
582 			printk(KERN_DEBUG
583 			       "%2d: %08x %08x %08x %08x  %02x %02x %02x\n",
584 			       i,
585 			       (unsigned int)tp->rx_ring[i].status,
586 			       (unsigned int)tp->rx_ring[i].length,
587 			       (unsigned int)tp->rx_ring[i].buffer1,
588 			       (unsigned int)tp->rx_ring[i].buffer2,
589 			       buf[0], buf[1], buf[2]);
590 			for (j = 0; buf[j] != 0xee && j < 1600; j++)
591 				if (j < 100)
592 					pr_cont(" %02x", buf[j]);
593 			pr_cont(" j=%d\n", j);
594 		}
595 		printk(KERN_DEBUG "  Rx ring %p: ", tp->rx_ring);
596 		for (i = 0; i < RX_RING_SIZE; i++)
597 			pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
598 		printk(KERN_DEBUG "  Tx ring %p: ", tp->tx_ring);
599 		for (i = 0; i < TX_RING_SIZE; i++)
600 			pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
601 		pr_cont("\n");
602 	}
603 #endif
604 
605 	tulip_tx_timeout_complete(tp, ioaddr);
606 
607 out_unlock:
608 	spin_unlock_irqrestore (&tp->lock, flags);
609 	dev->trans_start = jiffies; /* prevent tx timeout */
610 	netif_wake_queue (dev);
611 }
612 
613 
614 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
tulip_init_ring(struct net_device * dev)615 static void tulip_init_ring(struct net_device *dev)
616 {
617 	struct tulip_private *tp = netdev_priv(dev);
618 	int i;
619 
620 	tp->susp_rx = 0;
621 	tp->ttimer = 0;
622 	tp->nir = 0;
623 
624 	for (i = 0; i < RX_RING_SIZE; i++) {
625 		tp->rx_ring[i].status = 0x00000000;
626 		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
627 		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
628 		tp->rx_buffers[i].skb = NULL;
629 		tp->rx_buffers[i].mapping = 0;
630 	}
631 	/* Mark the last entry as wrapping the ring. */
632 	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
633 	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
634 
635 	for (i = 0; i < RX_RING_SIZE; i++) {
636 		dma_addr_t mapping;
637 
638 		/* Note the receive buffer must be longword aligned.
639 		   netdev_alloc_skb() provides 16 byte alignment.  But do *not*
640 		   use skb_reserve() to align the IP header! */
641 		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
642 		tp->rx_buffers[i].skb = skb;
643 		if (skb == NULL)
644 			break;
645 		mapping = pci_map_single(tp->pdev, skb->data,
646 					 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
647 		tp->rx_buffers[i].mapping = mapping;
648 		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
649 		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
650 	}
651 	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
652 
653 	/* The Tx buffer descriptor is filled in as needed, but we
654 	   do need to clear the ownership bit. */
655 	for (i = 0; i < TX_RING_SIZE; i++) {
656 		tp->tx_buffers[i].skb = NULL;
657 		tp->tx_buffers[i].mapping = 0;
658 		tp->tx_ring[i].status = 0x00000000;
659 		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
660 	}
661 	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
662 }
663 
664 static netdev_tx_t
tulip_start_xmit(struct sk_buff * skb,struct net_device * dev)665 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
666 {
667 	struct tulip_private *tp = netdev_priv(dev);
668 	int entry;
669 	u32 flag;
670 	dma_addr_t mapping;
671 	unsigned long flags;
672 
673 	spin_lock_irqsave(&tp->lock, flags);
674 
675 	/* Calculate the next Tx descriptor entry. */
676 	entry = tp->cur_tx % TX_RING_SIZE;
677 
678 	tp->tx_buffers[entry].skb = skb;
679 	mapping = pci_map_single(tp->pdev, skb->data,
680 				 skb->len, PCI_DMA_TODEVICE);
681 	tp->tx_buffers[entry].mapping = mapping;
682 	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
683 
684 	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
685 		flag = 0x60000000; /* No interrupt */
686 	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
687 		flag = 0xe0000000; /* Tx-done intr. */
688 	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
689 		flag = 0x60000000; /* No Tx-done intr. */
690 	} else {		/* Leave room for set_rx_mode() to fill entries. */
691 		flag = 0xe0000000; /* Tx-done intr. */
692 		netif_stop_queue(dev);
693 	}
694 	if (entry == TX_RING_SIZE-1)
695 		flag = 0xe0000000 | DESC_RING_WRAP;
696 
697 	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
698 	/* if we were using Transmit Automatic Polling, we would need a
699 	 * wmb() here. */
700 	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
701 	wmb();
702 
703 	tp->cur_tx++;
704 
705 	/* Trigger an immediate transmit demand. */
706 	iowrite32(0, tp->base_addr + CSR1);
707 
708 	spin_unlock_irqrestore(&tp->lock, flags);
709 
710 	return NETDEV_TX_OK;
711 }
712 
tulip_clean_tx_ring(struct tulip_private * tp)713 static void tulip_clean_tx_ring(struct tulip_private *tp)
714 {
715 	unsigned int dirty_tx;
716 
717 	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
718 		dirty_tx++) {
719 		int entry = dirty_tx % TX_RING_SIZE;
720 		int status = le32_to_cpu(tp->tx_ring[entry].status);
721 
722 		if (status < 0) {
723 			tp->dev->stats.tx_errors++;	/* It wasn't Txed */
724 			tp->tx_ring[entry].status = 0;
725 		}
726 
727 		/* Check for Tx filter setup frames. */
728 		if (tp->tx_buffers[entry].skb == NULL) {
729 			/* test because dummy frames not mapped */
730 			if (tp->tx_buffers[entry].mapping)
731 				pci_unmap_single(tp->pdev,
732 					tp->tx_buffers[entry].mapping,
733 					sizeof(tp->setup_frame),
734 					PCI_DMA_TODEVICE);
735 			continue;
736 		}
737 
738 		pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
739 				tp->tx_buffers[entry].skb->len,
740 				PCI_DMA_TODEVICE);
741 
742 		/* Free the original skb. */
743 		dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
744 		tp->tx_buffers[entry].skb = NULL;
745 		tp->tx_buffers[entry].mapping = 0;
746 	}
747 }
748 
tulip_down(struct net_device * dev)749 static void tulip_down (struct net_device *dev)
750 {
751 	struct tulip_private *tp = netdev_priv(dev);
752 	void __iomem *ioaddr = tp->base_addr;
753 	unsigned long flags;
754 
755 	cancel_work_sync(&tp->media_work);
756 
757 #ifdef CONFIG_TULIP_NAPI
758 	napi_disable(&tp->napi);
759 #endif
760 
761 	del_timer_sync (&tp->timer);
762 #ifdef CONFIG_TULIP_NAPI
763 	del_timer_sync (&tp->oom_timer);
764 #endif
765 	spin_lock_irqsave (&tp->lock, flags);
766 
767 	/* Disable interrupts by clearing the interrupt mask. */
768 	iowrite32 (0x00000000, ioaddr + CSR7);
769 
770 	/* Stop the Tx and Rx processes. */
771 	tulip_stop_rxtx(tp);
772 
773 	/* prepare receive buffers */
774 	tulip_refill_rx(dev);
775 
776 	/* release any unconsumed transmit buffers */
777 	tulip_clean_tx_ring(tp);
778 
779 	if (ioread32(ioaddr + CSR6) != 0xffffffff)
780 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
781 
782 	spin_unlock_irqrestore (&tp->lock, flags);
783 
784 	init_timer(&tp->timer);
785 	tp->timer.data = (unsigned long)dev;
786 	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
787 
788 	dev->if_port = tp->saved_if_port;
789 
790 	/* Leave the driver in snooze, not sleep, mode. */
791 	tulip_set_power_state (tp, 0, 1);
792 }
793 
tulip_free_ring(struct net_device * dev)794 static void tulip_free_ring (struct net_device *dev)
795 {
796 	struct tulip_private *tp = netdev_priv(dev);
797 	int i;
798 
799 	/* Free all the skbuffs in the Rx queue. */
800 	for (i = 0; i < RX_RING_SIZE; i++) {
801 		struct sk_buff *skb = tp->rx_buffers[i].skb;
802 		dma_addr_t mapping = tp->rx_buffers[i].mapping;
803 
804 		tp->rx_buffers[i].skb = NULL;
805 		tp->rx_buffers[i].mapping = 0;
806 
807 		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
808 		tp->rx_ring[i].length = 0;
809 		/* An invalid address. */
810 		tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
811 		if (skb) {
812 			pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
813 					 PCI_DMA_FROMDEVICE);
814 			dev_kfree_skb (skb);
815 		}
816 	}
817 
818 	for (i = 0; i < TX_RING_SIZE; i++) {
819 		struct sk_buff *skb = tp->tx_buffers[i].skb;
820 
821 		if (skb != NULL) {
822 			pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
823 					 skb->len, PCI_DMA_TODEVICE);
824 			dev_kfree_skb (skb);
825 		}
826 		tp->tx_buffers[i].skb = NULL;
827 		tp->tx_buffers[i].mapping = 0;
828 	}
829 }
830 
tulip_close(struct net_device * dev)831 static int tulip_close (struct net_device *dev)
832 {
833 	struct tulip_private *tp = netdev_priv(dev);
834 	void __iomem *ioaddr = tp->base_addr;
835 
836 	netif_stop_queue (dev);
837 
838 	tulip_down (dev);
839 
840 	if (tulip_debug > 1)
841 		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
842 			   ioread32 (ioaddr + CSR5));
843 
844 	free_irq (dev->irq, dev);
845 
846 	tulip_free_ring (dev);
847 
848 	return 0;
849 }
850 
tulip_get_stats(struct net_device * dev)851 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
852 {
853 	struct tulip_private *tp = netdev_priv(dev);
854 	void __iomem *ioaddr = tp->base_addr;
855 
856 	if (netif_running(dev)) {
857 		unsigned long flags;
858 
859 		spin_lock_irqsave (&tp->lock, flags);
860 
861 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
862 
863 		spin_unlock_irqrestore(&tp->lock, flags);
864 	}
865 
866 	return &dev->stats;
867 }
868 
869 
tulip_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)870 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
871 {
872 	struct tulip_private *np = netdev_priv(dev);
873 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
874 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
875 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
876 }
877 
878 
tulip_ethtool_set_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)879 static int tulip_ethtool_set_wol(struct net_device *dev,
880 				 struct ethtool_wolinfo *wolinfo)
881 {
882 	struct tulip_private *tp = netdev_priv(dev);
883 
884 	if (wolinfo->wolopts & (~tp->wolinfo.supported))
885 		   return -EOPNOTSUPP;
886 
887 	tp->wolinfo.wolopts = wolinfo->wolopts;
888 	device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
889 	return 0;
890 }
891 
tulip_ethtool_get_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)892 static void tulip_ethtool_get_wol(struct net_device *dev,
893 				  struct ethtool_wolinfo *wolinfo)
894 {
895 	struct tulip_private *tp = netdev_priv(dev);
896 
897 	wolinfo->supported = tp->wolinfo.supported;
898 	wolinfo->wolopts = tp->wolinfo.wolopts;
899 	return;
900 }
901 
902 
903 static const struct ethtool_ops ops = {
904 	.get_drvinfo = tulip_get_drvinfo,
905 	.set_wol     = tulip_ethtool_set_wol,
906 	.get_wol     = tulip_ethtool_get_wol,
907 };
908 
909 /* Provide ioctl() calls to examine the MII xcvr state. */
private_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)910 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
911 {
912 	struct tulip_private *tp = netdev_priv(dev);
913 	void __iomem *ioaddr = tp->base_addr;
914 	struct mii_ioctl_data *data = if_mii(rq);
915 	const unsigned int phy_idx = 0;
916 	int phy = tp->phys[phy_idx] & 0x1f;
917 	unsigned int regnum = data->reg_num;
918 
919 	switch (cmd) {
920 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
921 		if (tp->mii_cnt)
922 			data->phy_id = phy;
923 		else if (tp->flags & HAS_NWAY)
924 			data->phy_id = 32;
925 		else if (tp->chip_id == COMET)
926 			data->phy_id = 1;
927 		else
928 			return -ENODEV;
929 
930 	case SIOCGMIIREG:		/* Read MII PHY register. */
931 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
932 			int csr12 = ioread32 (ioaddr + CSR12);
933 			int csr14 = ioread32 (ioaddr + CSR14);
934 			switch (regnum) {
935 			case 0:
936                                 if (((csr14<<5) & 0x1000) ||
937                                         (dev->if_port == 5 && tp->nwayset))
938                                         data->val_out = 0x1000;
939                                 else
940                                         data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
941                                                 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
942 				break;
943 			case 1:
944                                 data->val_out =
945 					0x1848 +
946 					((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
947 					((csr12&0x06) == 6 ? 0 : 4);
948                                 data->val_out |= 0x6048;
949 				break;
950 			case 4:
951                                 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
952                                 data->val_out =
953 					((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
954 					((csr14 >> 1) & 0x20) + 1;
955                                 data->val_out |= ((csr14 >> 9) & 0x03C0);
956 				break;
957 			case 5: data->val_out = tp->lpar; break;
958 			default: data->val_out = 0; break;
959 			}
960 		} else {
961 			data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
962 		}
963 		return 0;
964 
965 	case SIOCSMIIREG:		/* Write MII PHY register. */
966 		if (regnum & ~0x1f)
967 			return -EINVAL;
968 		if (data->phy_id == phy) {
969 			u16 value = data->val_in;
970 			switch (regnum) {
971 			case 0:	/* Check for autonegotiation on or reset. */
972 				tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
973 				if (tp->full_duplex_lock)
974 					tp->full_duplex = (value & 0x0100) ? 1 : 0;
975 				break;
976 			case 4:
977 				tp->advertising[phy_idx] =
978 				tp->mii_advertise = data->val_in;
979 				break;
980 			}
981 		}
982 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
983 			u16 value = data->val_in;
984 			if (regnum == 0) {
985 			  if ((value & 0x1200) == 0x1200) {
986 			    if (tp->chip_id == PNIC2) {
987                                    pnic2_start_nway (dev);
988                             } else {
989 				   t21142_start_nway (dev);
990                             }
991 			  }
992 			} else if (regnum == 4)
993 				tp->sym_advertise = value;
994 		} else {
995 			tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
996 		}
997 		return 0;
998 	default:
999 		return -EOPNOTSUPP;
1000 	}
1001 
1002 	return -EOPNOTSUPP;
1003 }
1004 
1005 
1006 /* Set or clear the multicast filter for this adaptor.
1007    Note that we only use exclusion around actually queueing the
1008    new frame, not around filling tp->setup_frame.  This is non-deterministic
1009    when re-entered but still correct. */
1010 
1011 #undef set_bit_le
1012 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
1013 
build_setup_frame_hash(u16 * setup_frm,struct net_device * dev)1014 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1015 {
1016 	struct tulip_private *tp = netdev_priv(dev);
1017 	u16 hash_table[32];
1018 	struct netdev_hw_addr *ha;
1019 	int i;
1020 	u16 *eaddrs;
1021 
1022 	memset(hash_table, 0, sizeof(hash_table));
1023 	set_bit_le(255, hash_table); 			/* Broadcast entry */
1024 	/* This should work on big-endian machines as well. */
1025 	netdev_for_each_mc_addr(ha, dev) {
1026 		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1027 
1028 		set_bit_le(index, hash_table);
1029 	}
1030 	for (i = 0; i < 32; i++) {
1031 		*setup_frm++ = hash_table[i];
1032 		*setup_frm++ = hash_table[i];
1033 	}
1034 	setup_frm = &tp->setup_frame[13*6];
1035 
1036 	/* Fill the final entry with our physical address. */
1037 	eaddrs = (u16 *)dev->dev_addr;
1038 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1039 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1040 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1041 }
1042 
build_setup_frame_perfect(u16 * setup_frm,struct net_device * dev)1043 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1044 {
1045 	struct tulip_private *tp = netdev_priv(dev);
1046 	struct netdev_hw_addr *ha;
1047 	u16 *eaddrs;
1048 
1049 	/* We have <= 14 addresses so we can use the wonderful
1050 	   16 address perfect filtering of the Tulip. */
1051 	netdev_for_each_mc_addr(ha, dev) {
1052 		eaddrs = (u16 *) ha->addr;
1053 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1054 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1055 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1056 	}
1057 	/* Fill the unused entries with the broadcast address. */
1058 	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1059 	setup_frm = &tp->setup_frame[15*6];
1060 
1061 	/* Fill the final entry with our physical address. */
1062 	eaddrs = (u16 *)dev->dev_addr;
1063 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1064 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1065 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1066 }
1067 
1068 
set_rx_mode(struct net_device * dev)1069 static void set_rx_mode(struct net_device *dev)
1070 {
1071 	struct tulip_private *tp = netdev_priv(dev);
1072 	void __iomem *ioaddr = tp->base_addr;
1073 	int csr6;
1074 
1075 	csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1076 
1077 	tp->csr6 &= ~0x00D5;
1078 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1079 		tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1080 		csr6 |= AcceptAllMulticast | AcceptAllPhys;
1081 	} else if ((netdev_mc_count(dev) > 1000) ||
1082 		   (dev->flags & IFF_ALLMULTI)) {
1083 		/* Too many to filter well -- accept all multicasts. */
1084 		tp->csr6 |= AcceptAllMulticast;
1085 		csr6 |= AcceptAllMulticast;
1086 	} else	if (tp->flags & MC_HASH_ONLY) {
1087 		/* Some work-alikes have only a 64-entry hash filter table. */
1088 		/* Should verify correctness on big-endian/__powerpc__ */
1089 		struct netdev_hw_addr *ha;
1090 		if (netdev_mc_count(dev) > 64) {
1091 			/* Arbitrary non-effective limit. */
1092 			tp->csr6 |= AcceptAllMulticast;
1093 			csr6 |= AcceptAllMulticast;
1094 		} else {
1095 			u32 mc_filter[2] = {0, 0};		 /* Multicast hash filter */
1096 			int filterbit;
1097 			netdev_for_each_mc_addr(ha, dev) {
1098 				if (tp->flags & COMET_MAC_ADDR)
1099 					filterbit = ether_crc_le(ETH_ALEN,
1100 								 ha->addr);
1101 				else
1102 					filterbit = ether_crc(ETH_ALEN,
1103 							      ha->addr) >> 26;
1104 				filterbit &= 0x3f;
1105 				mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1106 				if (tulip_debug > 2)
1107 					dev_info(&dev->dev,
1108 						 "Added filter for %pM  %08x bit %d\n",
1109 						 ha->addr,
1110 						 ether_crc(ETH_ALEN, ha->addr),
1111 						 filterbit);
1112 			}
1113 			if (mc_filter[0] == tp->mc_filter[0]  &&
1114 				mc_filter[1] == tp->mc_filter[1])
1115 				;				/* No change. */
1116 			else if (tp->flags & IS_ASIX) {
1117 				iowrite32(2, ioaddr + CSR13);
1118 				iowrite32(mc_filter[0], ioaddr + CSR14);
1119 				iowrite32(3, ioaddr + CSR13);
1120 				iowrite32(mc_filter[1], ioaddr + CSR14);
1121 			} else if (tp->flags & COMET_MAC_ADDR) {
1122 				iowrite32(mc_filter[0], ioaddr + CSR27);
1123 				iowrite32(mc_filter[1], ioaddr + CSR28);
1124 			}
1125 			tp->mc_filter[0] = mc_filter[0];
1126 			tp->mc_filter[1] = mc_filter[1];
1127 		}
1128 	} else {
1129 		unsigned long flags;
1130 		u32 tx_flags = 0x08000000 | 192;
1131 
1132 		/* Note that only the low-address shortword of setup_frame is valid!
1133 		   The values are doubled for big-endian architectures. */
1134 		if (netdev_mc_count(dev) > 14) {
1135 			/* Must use a multicast hash table. */
1136 			build_setup_frame_hash(tp->setup_frame, dev);
1137 			tx_flags = 0x08400000 | 192;
1138 		} else {
1139 			build_setup_frame_perfect(tp->setup_frame, dev);
1140 		}
1141 
1142 		spin_lock_irqsave(&tp->lock, flags);
1143 
1144 		if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1145 			/* Same setup recently queued, we need not add it. */
1146 		} else {
1147 			unsigned int entry;
1148 			int dummy = -1;
1149 
1150 			/* Now add this frame to the Tx list. */
1151 
1152 			entry = tp->cur_tx++ % TX_RING_SIZE;
1153 
1154 			if (entry != 0) {
1155 				/* Avoid a chip errata by prefixing a dummy entry. */
1156 				tp->tx_buffers[entry].skb = NULL;
1157 				tp->tx_buffers[entry].mapping = 0;
1158 				tp->tx_ring[entry].length =
1159 					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1160 				tp->tx_ring[entry].buffer1 = 0;
1161 				/* Must set DescOwned later to avoid race with chip */
1162 				dummy = entry;
1163 				entry = tp->cur_tx++ % TX_RING_SIZE;
1164 
1165 			}
1166 
1167 			tp->tx_buffers[entry].skb = NULL;
1168 			tp->tx_buffers[entry].mapping =
1169 				pci_map_single(tp->pdev, tp->setup_frame,
1170 					       sizeof(tp->setup_frame),
1171 					       PCI_DMA_TODEVICE);
1172 			/* Put the setup frame on the Tx list. */
1173 			if (entry == TX_RING_SIZE-1)
1174 				tx_flags |= DESC_RING_WRAP;		/* Wrap ring. */
1175 			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1176 			tp->tx_ring[entry].buffer1 =
1177 				cpu_to_le32(tp->tx_buffers[entry].mapping);
1178 			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1179 			if (dummy >= 0)
1180 				tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1181 			if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1182 				netif_stop_queue(dev);
1183 
1184 			/* Trigger an immediate transmit demand. */
1185 			iowrite32(0, ioaddr + CSR1);
1186 		}
1187 
1188 		spin_unlock_irqrestore(&tp->lock, flags);
1189 	}
1190 
1191 	iowrite32(csr6, ioaddr + CSR6);
1192 }
1193 
1194 #ifdef CONFIG_TULIP_MWI
tulip_mwi_config(struct pci_dev * pdev,struct net_device * dev)1195 static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1196 					struct net_device *dev)
1197 {
1198 	struct tulip_private *tp = netdev_priv(dev);
1199 	u8 cache;
1200 	u16 pci_command;
1201 	u32 csr0;
1202 
1203 	if (tulip_debug > 3)
1204 		netdev_dbg(dev, "tulip_mwi_config()\n");
1205 
1206 	tp->csr0 = csr0 = 0;
1207 
1208 	/* if we have any cache line size at all, we can do MRM and MWI */
1209 	csr0 |= MRM | MWI;
1210 
1211 	/* Enable MWI in the standard PCI command bit.
1212 	 * Check for the case where MWI is desired but not available
1213 	 */
1214 	pci_try_set_mwi(pdev);
1215 
1216 	/* read result from hardware (in case bit refused to enable) */
1217 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1218 	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1219 		csr0 &= ~MWI;
1220 
1221 	/* if cache line size hardwired to zero, no MWI */
1222 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1223 	if ((csr0 & MWI) && (cache == 0)) {
1224 		csr0 &= ~MWI;
1225 		pci_clear_mwi(pdev);
1226 	}
1227 
1228 	/* assign per-cacheline-size cache alignment and
1229 	 * burst length values
1230 	 */
1231 	switch (cache) {
1232 	case 8:
1233 		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1234 		break;
1235 	case 16:
1236 		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1237 		break;
1238 	case 32:
1239 		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1240 		break;
1241 	default:
1242 		cache = 0;
1243 		break;
1244 	}
1245 
1246 	/* if we have a good cache line size, we by now have a good
1247 	 * csr0, so save it and exit
1248 	 */
1249 	if (cache)
1250 		goto out;
1251 
1252 	/* we don't have a good csr0 or cache line size, disable MWI */
1253 	if (csr0 & MWI) {
1254 		pci_clear_mwi(pdev);
1255 		csr0 &= ~MWI;
1256 	}
1257 
1258 	/* sane defaults for burst length and cache alignment
1259 	 * originally from de4x5 driver
1260 	 */
1261 	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1262 
1263 out:
1264 	tp->csr0 = csr0;
1265 	if (tulip_debug > 2)
1266 		netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1267 			   cache, csr0);
1268 }
1269 #endif
1270 
1271 /*
1272  *	Chips that have the MRM/reserved bit quirk and the burst quirk. That
1273  *	is the DM910X and the on chip ULi devices
1274  */
1275 
tulip_uli_dm_quirk(struct pci_dev * pdev)1276 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1277 {
1278 	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1279 		return 1;
1280 	return 0;
1281 }
1282 
1283 static const struct net_device_ops tulip_netdev_ops = {
1284 	.ndo_open		= tulip_open,
1285 	.ndo_start_xmit		= tulip_start_xmit,
1286 	.ndo_tx_timeout		= tulip_tx_timeout,
1287 	.ndo_stop		= tulip_close,
1288 	.ndo_get_stats		= tulip_get_stats,
1289 	.ndo_do_ioctl 		= private_ioctl,
1290 	.ndo_set_rx_mode	= set_rx_mode,
1291 	.ndo_change_mtu		= eth_change_mtu,
1292 	.ndo_set_mac_address	= eth_mac_addr,
1293 	.ndo_validate_addr	= eth_validate_addr,
1294 #ifdef CONFIG_NET_POLL_CONTROLLER
1295 	.ndo_poll_controller	 = poll_tulip,
1296 #endif
1297 };
1298 
1299 DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
1300 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1301 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1302 	{ },
1303 };
1304 
tulip_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1305 static int __devinit tulip_init_one (struct pci_dev *pdev,
1306 				     const struct pci_device_id *ent)
1307 {
1308 	struct tulip_private *tp;
1309 	/* See note below on the multiport cards. */
1310 	static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1311 	static int last_irq;
1312 	static int multiport_cnt;	/* For four-port boards w/one EEPROM */
1313 	int i, irq;
1314 	unsigned short sum;
1315 	unsigned char *ee_data;
1316 	struct net_device *dev;
1317 	void __iomem *ioaddr;
1318 	static int board_idx = -1;
1319 	int chip_idx = ent->driver_data;
1320 	const char *chip_name = tulip_tbl[chip_idx].chip_name;
1321 	unsigned int eeprom_missing = 0;
1322 	unsigned int force_csr0 = 0;
1323 
1324 #ifndef MODULE
1325 	if (tulip_debug > 0)
1326 		printk_once(KERN_INFO "%s", version);
1327 #endif
1328 
1329 	board_idx++;
1330 
1331 	/*
1332 	 *	Lan media wire a tulip chip to a wan interface. Needs a very
1333 	 *	different driver (lmc driver)
1334 	 */
1335 
1336         if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1337 		pr_err("skipping LMC card\n");
1338 		return -ENODEV;
1339 	} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1340 		   (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1341 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1342 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1343 		pr_err("skipping SBE T3E3 port\n");
1344 		return -ENODEV;
1345 	}
1346 
1347 	/*
1348 	 *	DM910x chips should be handled by the dmfe driver, except
1349 	 *	on-board chips on SPARC systems.  Also, early DM9100s need
1350 	 *	software CRC which only the dmfe driver supports.
1351 	 */
1352 
1353 #ifdef CONFIG_TULIP_DM910X
1354 	if (chip_idx == DM910X) {
1355 		struct device_node *dp;
1356 
1357 		if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1358 		    pdev->revision < 0x30) {
1359 			pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1360 			return -ENODEV;
1361 		}
1362 
1363 		dp = pci_device_to_OF_node(pdev);
1364 		if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1365 			pr_info("skipping DM910x expansion card (use dmfe)\n");
1366 			return -ENODEV;
1367 		}
1368 	}
1369 #endif
1370 
1371 	/*
1372 	 *	Looks for early PCI chipsets where people report hangs
1373 	 *	without the workarounds being on.
1374 	 */
1375 
1376 	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1377 	      aligned.  Aries might need this too. The Saturn errata are not
1378 	      pretty reading but thankfully it's an old 486 chipset.
1379 
1380 	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
1381 	      Saturn.
1382 	*/
1383 
1384 	if (pci_dev_present(early_486_chipsets)) {
1385 		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1386 		force_csr0 = 1;
1387 	}
1388 
1389 	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
1390 	if (chip_idx == AX88140) {
1391 		if ((csr0 & 0x3f00) == 0)
1392 			csr0 |= 0x2000;
1393 	}
1394 
1395 	/* PNIC doesn't have MWI/MRL/MRM... */
1396 	if (chip_idx == LC82C168)
1397 		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1398 
1399 	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1400 	if (tulip_uli_dm_quirk(pdev)) {
1401 		csr0 &= ~0x01f100ff;
1402 #if defined(CONFIG_SPARC)
1403                 csr0 = (csr0 & ~0xff00) | 0xe000;
1404 #endif
1405 	}
1406 	/*
1407 	 *	And back to business
1408 	 */
1409 
1410 	i = pci_enable_device(pdev);
1411 	if (i) {
1412 		pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1413 		return i;
1414 	}
1415 
1416 	/* The chip will fail to enter a low-power state later unless
1417 	 * first explicitly commanded into D0 */
1418 	if (pci_set_power_state(pdev, PCI_D0)) {
1419 		pr_notice("Failed to set power state to D0\n");
1420 	}
1421 
1422 	irq = pdev->irq;
1423 
1424 	/* alloc_etherdev ensures aligned and zeroed private structures */
1425 	dev = alloc_etherdev (sizeof (*tp));
1426 	if (!dev)
1427 		return -ENOMEM;
1428 
1429 	SET_NETDEV_DEV(dev, &pdev->dev);
1430 	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1431 		pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1432 		       pci_name(pdev),
1433 		       (unsigned long long)pci_resource_len (pdev, 0),
1434 		       (unsigned long long)pci_resource_start (pdev, 0));
1435 		goto err_out_free_netdev;
1436 	}
1437 
1438 	/* grab all resources from both PIO and MMIO regions, as we
1439 	 * don't want anyone else messing around with our hardware */
1440 	if (pci_request_regions (pdev, DRV_NAME))
1441 		goto err_out_free_netdev;
1442 
1443 	ioaddr =  pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1444 
1445 	if (!ioaddr)
1446 		goto err_out_free_res;
1447 
1448 	/*
1449 	 * initialize private data structure 'tp'
1450 	 * it is zeroed and aligned in alloc_etherdev
1451 	 */
1452 	tp = netdev_priv(dev);
1453 	tp->dev = dev;
1454 
1455 	tp->rx_ring = pci_alloc_consistent(pdev,
1456 					   sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1457 					   sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1458 					   &tp->rx_ring_dma);
1459 	if (!tp->rx_ring)
1460 		goto err_out_mtable;
1461 	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1462 	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1463 
1464 	tp->chip_id = chip_idx;
1465 	tp->flags = tulip_tbl[chip_idx].flags;
1466 
1467 	tp->wolinfo.supported = 0;
1468 	tp->wolinfo.wolopts = 0;
1469 	/* COMET: Enable power management only for AN983B */
1470 	if (chip_idx == COMET ) {
1471 		u32 sig;
1472 		pci_read_config_dword (pdev, 0x80, &sig);
1473 		if (sig == 0x09811317) {
1474 			tp->flags |= COMET_PM;
1475 			tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1476 			pr_info("%s: Enabled WOL support for AN983B\n",
1477 				__func__);
1478 		}
1479 	}
1480 	tp->pdev = pdev;
1481 	tp->base_addr = ioaddr;
1482 	tp->revision = pdev->revision;
1483 	tp->csr0 = csr0;
1484 	spin_lock_init(&tp->lock);
1485 	spin_lock_init(&tp->mii_lock);
1486 	init_timer(&tp->timer);
1487 	tp->timer.data = (unsigned long)dev;
1488 	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1489 
1490 	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1491 
1492 	dev->base_addr = (unsigned long)ioaddr;
1493 
1494 #ifdef CONFIG_TULIP_MWI
1495 	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1496 		tulip_mwi_config (pdev, dev);
1497 #endif
1498 
1499 	/* Stop the chip's Tx and Rx processes. */
1500 	tulip_stop_rxtx(tp);
1501 
1502 	pci_set_master(pdev);
1503 
1504 #ifdef CONFIG_GSC
1505 	if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1506 		switch (pdev->subsystem_device) {
1507 		default:
1508 			break;
1509 		case 0x1061:
1510 		case 0x1062:
1511 		case 0x1063:
1512 		case 0x1098:
1513 		case 0x1099:
1514 		case 0x10EE:
1515 			tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1516 			chip_name = "GSC DS21140 Tulip";
1517 		}
1518 	}
1519 #endif
1520 
1521 	/* Clear the missed-packet counter. */
1522 	ioread32(ioaddr + CSR8);
1523 
1524 	/* The station address ROM is read byte serially.  The register must
1525 	   be polled, waiting for the value to be read bit serially from the
1526 	   EEPROM.
1527 	   */
1528 	ee_data = tp->eeprom;
1529 	memset(ee_data, 0, sizeof(tp->eeprom));
1530 	sum = 0;
1531 	if (chip_idx == LC82C168) {
1532 		for (i = 0; i < 3; i++) {
1533 			int value, boguscnt = 100000;
1534 			iowrite32(0x600 | i, ioaddr + 0x98);
1535 			do {
1536 				value = ioread32(ioaddr + CSR9);
1537 			} while (value < 0  && --boguscnt > 0);
1538 			put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1539 			sum += value & 0xffff;
1540 		}
1541 	} else if (chip_idx == COMET) {
1542 		/* No need to read the EEPROM. */
1543 		put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1544 		put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1545 		for (i = 0; i < 6; i ++)
1546 			sum += dev->dev_addr[i];
1547 	} else {
1548 		/* A serial EEPROM interface, we read now and sort it out later. */
1549 		int sa_offset = 0;
1550 		int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1551 		int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1552 
1553 		if (ee_max_addr > sizeof(tp->eeprom))
1554 			ee_max_addr = sizeof(tp->eeprom);
1555 
1556 		for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1557 			u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1558 			ee_data[i] = data & 0xff;
1559 			ee_data[i + 1] = data >> 8;
1560 		}
1561 
1562 		/* DEC now has a specification (see Notes) but early board makers
1563 		   just put the address in the first EEPROM locations. */
1564 		/* This does  memcmp(ee_data, ee_data+16, 8) */
1565 		for (i = 0; i < 8; i ++)
1566 			if (ee_data[i] != ee_data[16+i])
1567 				sa_offset = 20;
1568 		if (chip_idx == CONEXANT) {
1569 			/* Check that the tuple type and length is correct. */
1570 			if (ee_data[0x198] == 0x04  &&  ee_data[0x199] == 6)
1571 				sa_offset = 0x19A;
1572 		} else if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&
1573 				   ee_data[2] == 0) {
1574 			sa_offset = 2;		/* Grrr, damn Matrox boards. */
1575 			multiport_cnt = 4;
1576 		}
1577 #ifdef CONFIG_MIPS_COBALT
1578                if ((pdev->bus->number == 0) &&
1579                    ((PCI_SLOT(pdev->devfn) == 7) ||
1580                     (PCI_SLOT(pdev->devfn) == 12))) {
1581                        /* Cobalt MAC address in first EEPROM locations. */
1582                        sa_offset = 0;
1583 		       /* Ensure our media table fixup get's applied */
1584 		       memcpy(ee_data + 16, ee_data, 8);
1585                }
1586 #endif
1587 #ifdef CONFIG_GSC
1588 		/* Check to see if we have a broken srom */
1589 		if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1590 			/* pci_vendor_id and subsystem_id are swapped */
1591 			ee_data[0] = ee_data[2];
1592 			ee_data[1] = ee_data[3];
1593 			ee_data[2] = 0x61;
1594 			ee_data[3] = 0x10;
1595 
1596 			/* HSC-PCI boards need to be byte-swaped and shifted
1597 			 * up 1 word.  This shift needs to happen at the end
1598 			 * of the MAC first because of the 2 byte overlap.
1599 			 */
1600 			for (i = 4; i >= 0; i -= 2) {
1601 				ee_data[17 + i + 3] = ee_data[17 + i];
1602 				ee_data[16 + i + 5] = ee_data[16 + i];
1603 			}
1604 		}
1605 #endif
1606 
1607 		for (i = 0; i < 6; i ++) {
1608 			dev->dev_addr[i] = ee_data[i + sa_offset];
1609 			sum += ee_data[i + sa_offset];
1610 		}
1611 	}
1612 	/* Lite-On boards have the address byte-swapped. */
1613 	if ((dev->dev_addr[0] == 0xA0 ||
1614 	     dev->dev_addr[0] == 0xC0 ||
1615 	     dev->dev_addr[0] == 0x02) &&
1616 	    dev->dev_addr[1] == 0x00)
1617 		for (i = 0; i < 6; i+=2) {
1618 			char tmp = dev->dev_addr[i];
1619 			dev->dev_addr[i] = dev->dev_addr[i+1];
1620 			dev->dev_addr[i+1] = tmp;
1621 		}
1622 	/* On the Zynx 315 Etherarray and other multiport boards only the
1623 	   first Tulip has an EEPROM.
1624 	   On Sparc systems the mac address is held in the OBP property
1625 	   "local-mac-address".
1626 	   The addresses of the subsequent ports are derived from the first.
1627 	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1628 	   that here as well. */
1629 	if (sum == 0  || sum == 6*0xff) {
1630 #if defined(CONFIG_SPARC)
1631 		struct device_node *dp = pci_device_to_OF_node(pdev);
1632 		const unsigned char *addr;
1633 		int len;
1634 #endif
1635 		eeprom_missing = 1;
1636 		for (i = 0; i < 5; i++)
1637 			dev->dev_addr[i] = last_phys_addr[i];
1638 		dev->dev_addr[i] = last_phys_addr[i] + 1;
1639 #if defined(CONFIG_SPARC)
1640 		addr = of_get_property(dp, "local-mac-address", &len);
1641 		if (addr && len == 6)
1642 			memcpy(dev->dev_addr, addr, 6);
1643 #endif
1644 #if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1645 		if (last_irq)
1646 			irq = last_irq;
1647 #endif
1648 	}
1649 
1650 	for (i = 0; i < 6; i++)
1651 		last_phys_addr[i] = dev->dev_addr[i];
1652 	last_irq = irq;
1653 	dev->irq = irq;
1654 
1655 	/* The lower four bits are the media type. */
1656 	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
1657 		if (options[board_idx] & MEDIA_MASK)
1658 			tp->default_port = options[board_idx] & MEDIA_MASK;
1659 		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1660 			tp->full_duplex = 1;
1661 		if (mtu[board_idx] > 0)
1662 			dev->mtu = mtu[board_idx];
1663 	}
1664 	if (dev->mem_start & MEDIA_MASK)
1665 		tp->default_port = dev->mem_start & MEDIA_MASK;
1666 	if (tp->default_port) {
1667 		pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1668 			board_idx, medianame[tp->default_port & MEDIA_MASK]);
1669 		tp->medialock = 1;
1670 		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1671 			tp->full_duplex = 1;
1672 	}
1673 	if (tp->full_duplex)
1674 		tp->full_duplex_lock = 1;
1675 
1676 	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1677 		static const u16 media2advert[] = {
1678 			0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1679 		};
1680 		tp->mii_advertise = media2advert[tp->default_port - 9];
1681 		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1682 	}
1683 
1684 	if (tp->flags & HAS_MEDIA_TABLE) {
1685 		sprintf(dev->name, DRV_NAME "%d", board_idx);	/* hack */
1686 		tulip_parse_eeprom(dev);
1687 		strcpy(dev->name, "eth%d");			/* un-hack */
1688 	}
1689 
1690 	if ((tp->flags & ALWAYS_CHECK_MII) ||
1691 		(tp->mtable  &&  tp->mtable->has_mii) ||
1692 		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
1693 		if (tp->mtable  &&  tp->mtable->has_mii) {
1694 			for (i = 0; i < tp->mtable->leafcount; i++)
1695 				if (tp->mtable->mleaf[i].media == 11) {
1696 					tp->cur_index = i;
1697 					tp->saved_if_port = dev->if_port;
1698 					tulip_select_media(dev, 2);
1699 					dev->if_port = tp->saved_if_port;
1700 					break;
1701 				}
1702 		}
1703 
1704 		/* Find the connected MII xcvrs.
1705 		   Doing this in open() would allow detecting external xcvrs
1706 		   later, but takes much time. */
1707 		tulip_find_mii (dev, board_idx);
1708 	}
1709 
1710 	/* The Tulip-specific entries in the device structure. */
1711 	dev->netdev_ops = &tulip_netdev_ops;
1712 	dev->watchdog_timeo = TX_TIMEOUT;
1713 #ifdef CONFIG_TULIP_NAPI
1714 	netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1715 #endif
1716 	SET_ETHTOOL_OPS(dev, &ops);
1717 
1718 	if (register_netdev(dev))
1719 		goto err_out_free_ring;
1720 
1721 	pci_set_drvdata(pdev, dev);
1722 
1723 	dev_info(&dev->dev,
1724 #ifdef CONFIG_TULIP_MMIO
1725 		 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1726 #else
1727 		 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1728 #endif
1729 		 chip_name, pdev->revision,
1730 		 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1731 		 eeprom_missing ? " EEPROM not present," : "",
1732 		 dev->dev_addr, irq);
1733 
1734         if (tp->chip_id == PNIC2)
1735 		tp->link_change = pnic2_lnk_change;
1736 	else if (tp->flags & HAS_NWAY)
1737 		tp->link_change = t21142_lnk_change;
1738 	else if (tp->flags & HAS_PNICNWAY)
1739 		tp->link_change = pnic_lnk_change;
1740 
1741 	/* Reset the xcvr interface and turn on heartbeat. */
1742 	switch (chip_idx) {
1743 	case DC21140:
1744 	case DM910X:
1745 	default:
1746 		if (tp->mtable)
1747 			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1748 		break;
1749 	case DC21142:
1750 		if (tp->mii_cnt  ||  tulip_media_cap[dev->if_port] & MediaIsMII) {
1751 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1752 			iowrite32(0x0000, ioaddr + CSR13);
1753 			iowrite32(0x0000, ioaddr + CSR14);
1754 			iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1755 		} else
1756 			t21142_start_nway(dev);
1757 		break;
1758 	case PNIC2:
1759 	        /* just do a reset for sanity sake */
1760 		iowrite32(0x0000, ioaddr + CSR13);
1761 		iowrite32(0x0000, ioaddr + CSR14);
1762 		break;
1763 	case LC82C168:
1764 		if ( ! tp->mii_cnt) {
1765 			tp->nway = 1;
1766 			tp->nwayset = 0;
1767 			iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1768 			iowrite32(0x30, ioaddr + CSR12);
1769 			iowrite32(0x0001F078, ioaddr + CSR6);
1770 			iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1771 		}
1772 		break;
1773 	case MX98713:
1774 	case COMPEX9881:
1775 		iowrite32(0x00000000, ioaddr + CSR6);
1776 		iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1777 		iowrite32(0x00000001, ioaddr + CSR13);
1778 		break;
1779 	case MX98715:
1780 	case MX98725:
1781 		iowrite32(0x01a80000, ioaddr + CSR6);
1782 		iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1783 		iowrite32(0x00001000, ioaddr + CSR12);
1784 		break;
1785 	case COMET:
1786 		/* No initialization necessary. */
1787 		break;
1788 	}
1789 
1790 	/* put the chip in snooze mode until opened */
1791 	tulip_set_power_state (tp, 0, 1);
1792 
1793 	return 0;
1794 
1795 err_out_free_ring:
1796 	pci_free_consistent (pdev,
1797 			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1798 			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1799 			     tp->rx_ring, tp->rx_ring_dma);
1800 
1801 err_out_mtable:
1802 	kfree (tp->mtable);
1803 	pci_iounmap(pdev, ioaddr);
1804 
1805 err_out_free_res:
1806 	pci_release_regions (pdev);
1807 
1808 err_out_free_netdev:
1809 	free_netdev (dev);
1810 	return -ENODEV;
1811 }
1812 
1813 
1814 /* set the registers according to the given wolopts */
tulip_set_wolopts(struct pci_dev * pdev,u32 wolopts)1815 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1816 {
1817 	struct net_device *dev = pci_get_drvdata(pdev);
1818 	struct tulip_private *tp = netdev_priv(dev);
1819 	void __iomem *ioaddr = tp->base_addr;
1820 
1821 	if (tp->flags & COMET_PM) {
1822 
1823 		unsigned int tmp;
1824 
1825 		tmp = ioread32(ioaddr + CSR18);
1826 		tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1827 		tmp |= comet_csr18_pm_mode;
1828 		iowrite32(tmp, ioaddr + CSR18);
1829 
1830 		/* Set the Wake-up Control/Status Register to the given WOL options*/
1831 		tmp = ioread32(ioaddr + CSR13);
1832 		tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1833 		if (wolopts & WAKE_MAGIC)
1834 			tmp |= comet_csr13_mpre;
1835 		if (wolopts & WAKE_PHY)
1836 			tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1837 		/* Clear the event flags */
1838 		tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1839 		iowrite32(tmp, ioaddr + CSR13);
1840 	}
1841 }
1842 
1843 #ifdef CONFIG_PM
1844 
1845 
tulip_suspend(struct pci_dev * pdev,pm_message_t state)1846 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1847 {
1848 	pci_power_t pstate;
1849 	struct net_device *dev = pci_get_drvdata(pdev);
1850 	struct tulip_private *tp = netdev_priv(dev);
1851 
1852 	if (!dev)
1853 		return -EINVAL;
1854 
1855 	if (!netif_running(dev))
1856 		goto save_state;
1857 
1858 	tulip_down(dev);
1859 
1860 	netif_device_detach(dev);
1861 	free_irq(dev->irq, dev);
1862 
1863 save_state:
1864 	pci_save_state(pdev);
1865 	pci_disable_device(pdev);
1866 	pstate = pci_choose_state(pdev, state);
1867 	if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1868 		int rc;
1869 
1870 		tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1871 		rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1872 		if (rc)
1873 			pr_err("pci_enable_wake failed (%d)\n", rc);
1874 	}
1875 	pci_set_power_state(pdev, pstate);
1876 
1877 	return 0;
1878 }
1879 
1880 
tulip_resume(struct pci_dev * pdev)1881 static int tulip_resume(struct pci_dev *pdev)
1882 {
1883 	struct net_device *dev = pci_get_drvdata(pdev);
1884 	struct tulip_private *tp = netdev_priv(dev);
1885 	void __iomem *ioaddr = tp->base_addr;
1886 	int retval;
1887 	unsigned int tmp;
1888 
1889 	if (!dev)
1890 		return -EINVAL;
1891 
1892 	pci_set_power_state(pdev, PCI_D0);
1893 	pci_restore_state(pdev);
1894 
1895 	if (!netif_running(dev))
1896 		return 0;
1897 
1898 	if ((retval = pci_enable_device(pdev))) {
1899 		pr_err("pci_enable_device failed in resume\n");
1900 		return retval;
1901 	}
1902 
1903 	if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
1904 		pr_err("request_irq failed in resume\n");
1905 		return retval;
1906 	}
1907 
1908 	if (tp->flags & COMET_PM) {
1909 		pci_enable_wake(pdev, PCI_D3hot, 0);
1910 		pci_enable_wake(pdev, PCI_D3cold, 0);
1911 
1912 		/* Clear the PMES flag */
1913 		tmp = ioread32(ioaddr + CSR20);
1914 		tmp |= comet_csr20_pmes;
1915 		iowrite32(tmp, ioaddr + CSR20);
1916 
1917 		/* Disable all wake-up events */
1918 		tulip_set_wolopts(pdev, 0);
1919 	}
1920 	netif_device_attach(dev);
1921 
1922 	if (netif_running(dev))
1923 		tulip_up(dev);
1924 
1925 	return 0;
1926 }
1927 
1928 #endif /* CONFIG_PM */
1929 
1930 
tulip_remove_one(struct pci_dev * pdev)1931 static void __devexit tulip_remove_one (struct pci_dev *pdev)
1932 {
1933 	struct net_device *dev = pci_get_drvdata (pdev);
1934 	struct tulip_private *tp;
1935 
1936 	if (!dev)
1937 		return;
1938 
1939 	tp = netdev_priv(dev);
1940 	unregister_netdev(dev);
1941 	pci_free_consistent (pdev,
1942 			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1943 			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1944 			     tp->rx_ring, tp->rx_ring_dma);
1945 	kfree (tp->mtable);
1946 	pci_iounmap(pdev, tp->base_addr);
1947 	free_netdev (dev);
1948 	pci_release_regions (pdev);
1949 	pci_set_drvdata (pdev, NULL);
1950 
1951 	/* pci_power_off (pdev, -1); */
1952 }
1953 
1954 #ifdef CONFIG_NET_POLL_CONTROLLER
1955 /*
1956  * Polling 'interrupt' - used by things like netconsole to send skbs
1957  * without having to re-enable interrupts. It's not called while
1958  * the interrupt routine is executing.
1959  */
1960 
poll_tulip(struct net_device * dev)1961 static void poll_tulip (struct net_device *dev)
1962 {
1963 	/* disable_irq here is not very nice, but with the lockless
1964 	   interrupt handler we have no other choice. */
1965 	disable_irq(dev->irq);
1966 	tulip_interrupt (dev->irq, dev);
1967 	enable_irq(dev->irq);
1968 }
1969 #endif
1970 
1971 static struct pci_driver tulip_driver = {
1972 	.name		= DRV_NAME,
1973 	.id_table	= tulip_pci_tbl,
1974 	.probe		= tulip_init_one,
1975 	.remove		= __devexit_p(tulip_remove_one),
1976 #ifdef CONFIG_PM
1977 	.suspend	= tulip_suspend,
1978 	.resume		= tulip_resume,
1979 #endif /* CONFIG_PM */
1980 };
1981 
1982 
tulip_init(void)1983 static int __init tulip_init (void)
1984 {
1985 #ifdef MODULE
1986 	pr_info("%s", version);
1987 #endif
1988 
1989 	/* copy module parms into globals */
1990 	tulip_rx_copybreak = rx_copybreak;
1991 	tulip_max_interrupt_work = max_interrupt_work;
1992 
1993 	/* probe for and init boards */
1994 	return pci_register_driver(&tulip_driver);
1995 }
1996 
1997 
tulip_cleanup(void)1998 static void __exit tulip_cleanup (void)
1999 {
2000 	pci_unregister_driver (&tulip_driver);
2001 }
2002 
2003 
2004 module_init(tulip_init);
2005 module_exit(tulip_cleanup);
2006