1 /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2 /*
3 	Written/copyright 1993-1998 by Donald Becker.
4 
5 	Copyright 1993 United States Government as represented by the
6 	Director, National Security Agency.
7 	This software may be used and distributed according to the terms
8 	of the GNU General Public License, incorporated herein by reference.
9 
10 	This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 	with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
12 
13 	The author may be reached as becker@scyld.com, or C/O
14 	Scyld Computing Corporation
15 	410 Severn Ave., Suite 210
16 	Annapolis MD 21403
17 
18 	Andrey V. Savochkin:
19 	- alignment problem with 1.3.* kernel and some minor changes.
20 	Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
21 	- added support for Linux/Alpha, but removed most of it, because
22         it worked only for the PCI chip.
23       - added hook for the 32bit lance driver
24       - added PCnetPCI II (79C970A) to chip table
25 	Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
26 	- hopefully fix above so Linux/Alpha can use ISA cards too.
27     8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
28     v1.12 10/27/97 Module support -djb
29     v1.14  2/3/98 Module support modified, made PCI support optional -djb
30     v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
31                   before unregister_netdev() which caused NULL pointer
32                   reference later in the chain (in rtnetlink_fill_ifinfo())
33                   -- Mika Kuoppala <miku@iki.fi>
34 
35     Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
36     the 2.1 version of the old driver - Alan Cox
37 
38     Get rid of check_region, check kmalloc return in lance_probe1
39     Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
40 */
41 
42 static const char version[] = "lance.c:v1.15ac 1999/11/13 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
43 
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/sched.h>
47 #include <linux/string.h>
48 #include <linux/ptrace.h>
49 #include <linux/errno.h>
50 #include <linux/ioport.h>
51 #include <linux/slab.h>
52 #include <linux/interrupt.h>
53 #include <linux/pci.h>
54 #include <linux/init.h>
55 #include <asm/bitops.h>
56 #include <asm/io.h>
57 #include <asm/dma.h>
58 
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/skbuff.h>
62 
63 static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
64 int lance_probe(struct net_device *dev);
65 static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
66 
67 #ifdef LANCE_DEBUG
68 static int lance_debug = LANCE_DEBUG;
69 #else
70 static int lance_debug = 1;
71 #endif
72 
73 /*
74 				Theory of Operation
75 
76 I. Board Compatibility
77 
78 This device driver is designed for the AMD 79C960, the "PCnet-ISA
79 single-chip ethernet controller for ISA".  This chip is used in a wide
80 variety of boards from vendors such as Allied Telesis, HP, Kingston,
81 and Boca.  This driver is also intended to work with older AMD 7990
82 designs, such as the NE1500 and NE2100, and newer 79C961.  For convenience,
83 I use the name LANCE to refer to all of the AMD chips, even though it properly
84 refers only to the original 7990.
85 
86 II. Board-specific settings
87 
88 The driver is designed to work the boards that use the faster
89 bus-master mode, rather than in shared memory mode.	 (Only older designs
90 have on-board buffer memory needed to support the slower shared memory mode.)
91 
92 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
93 channel.  This driver probes the likely base addresses:
94 {0x300, 0x320, 0x340, 0x360}.
95 After the board is found it generates a DMA-timeout interrupt and uses
96 autoIRQ to find the IRQ line.  The DMA channel can be set with the low bits
97 of the otherwise-unused dev->mem_start value (aka PARAM1).  If unset it is
98 probed for by enabling each free DMA channel in turn and checking if
99 initialization succeeds.
100 
101 The HP-J2405A board is an exception: with this board it is easy to read the
102 EEPROM-set values for the base, IRQ, and DMA.  (Of course you must already
103 _know_ the base address -- that field is for writing the EEPROM.)
104 
105 III. Driver operation
106 
107 IIIa. Ring buffers
108 The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
109 the base and length of the data buffer, along with status bits.	 The length
110 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
111 the buffer length (rather than being directly the buffer length) for
112 implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
113 ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
114 needlessly uses extra space and reduces the chance that an upper layer will
115 be able to reorder queued Tx packets based on priority.	 Decreasing the number
116 of entries makes it more difficult to achieve back-to-back packet transmission
117 and increases the chance that Rx ring will overflow.  (Consider the worst case
118 of receiving back-to-back minimum-sized packets.)
119 
120 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
121 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
122 avoid the administrative overhead. For the Rx side this avoids dynamically
123 allocating full-sized buffers "just in case", at the expense of a
124 memory-to-memory data copy for each packet received.  For most systems this
125 is a good tradeoff: the Rx buffer will always be in low memory, the copy
126 is inexpensive, and it primes the cache for later packet processing.  For Tx
127 the buffers are only used when needed as low-memory bounce buffers.
128 
129 IIIB. 16M memory limitations.
130 For the ISA bus master mode all structures used directly by the LANCE,
131 the initialization block, Rx and Tx rings, and data buffers, must be
132 accessible from the ISA bus, i.e. in the lower 16M of real memory.
133 This is a problem for current Linux kernels on >16M machines. The network
134 devices are initialized after memory initialization, and the kernel doles out
135 memory from the top of memory downward.	 The current solution is to have a
136 special network initialization routine that's called before memory
137 initialization; this will eventually be generalized for all network devices.
138 As mentioned before, low-memory "bounce-buffers" are used when needed.
139 
140 IIIC. Synchronization
141 The driver runs as two independent, single-threaded flows of control.  One
142 is the send-packet routine, which enforces single-threaded use by the
143 dev->tbusy flag.  The other thread is the interrupt handler, which is single
144 threaded by the hardware and other software.
145 
146 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
147 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
148 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
149 the 'lp->tx_full' flag.
150 
151 The interrupt handler has exclusive control over the Rx ring and records stats
152 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
153 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
154 stats.)	 After reaping the stats, it marks the queue entry as empty by setting
155 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
156 tx_full and tbusy flags.
157 
158 */
159 
160 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
161    Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
162    That translates to 4 and 4 (16 == 2^^4).
163    This is a compile-time option for efficiency.
164    */
165 #ifndef LANCE_LOG_TX_BUFFERS
166 #define LANCE_LOG_TX_BUFFERS 4
167 #define LANCE_LOG_RX_BUFFERS 4
168 #endif
169 
170 #define TX_RING_SIZE			(1 << (LANCE_LOG_TX_BUFFERS))
171 #define TX_RING_MOD_MASK		(TX_RING_SIZE - 1)
172 #define TX_RING_LEN_BITS		((LANCE_LOG_TX_BUFFERS) << 29)
173 
174 #define RX_RING_SIZE			(1 << (LANCE_LOG_RX_BUFFERS))
175 #define RX_RING_MOD_MASK		(RX_RING_SIZE - 1)
176 #define RX_RING_LEN_BITS		((LANCE_LOG_RX_BUFFERS) << 29)
177 
178 #define PKT_BUF_SZ		1544
179 
180 /* Offsets from base I/O address. */
181 #define LANCE_DATA 0x10
182 #define LANCE_ADDR 0x12
183 #define LANCE_RESET 0x14
184 #define LANCE_BUS_IF 0x16
185 #define LANCE_TOTAL_SIZE 0x18
186 
187 #define TX_TIMEOUT	20
188 
189 /* The LANCE Rx and Tx ring descriptors. */
190 struct lance_rx_head {
191 	s32 base;
192 	s16 buf_length;			/* This length is 2s complement (negative)! */
193 	s16 msg_length;			/* This length is "normal". */
194 };
195 
196 struct lance_tx_head {
197 	s32 base;
198 	s16 length;				/* Length is 2s complement (negative)! */
199 	s16 misc;
200 };
201 
202 /* The LANCE initialization block, described in databook. */
203 struct lance_init_block {
204 	u16 mode;		/* Pre-set mode (reg. 15) */
205 	u8  phys_addr[6]; /* Physical ethernet address */
206 	u32 filter[2];			/* Multicast filter (unused). */
207 	/* Receive and transmit ring base, along with extra bits. */
208 	u32  rx_ring;			/* Tx and Rx ring base pointers */
209 	u32  tx_ring;
210 };
211 
212 struct lance_private {
213 	/* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
214 	struct lance_rx_head rx_ring[RX_RING_SIZE];
215 	struct lance_tx_head tx_ring[TX_RING_SIZE];
216 	struct lance_init_block	init_block;
217 	const char *name;
218 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
219 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
220 	/* The addresses of receive-in-place skbuffs. */
221 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
222 	unsigned long rx_buffs;		/* Address of Rx and Tx buffers. */
223 	/* Tx low-memory "bounce buffer" address. */
224 	char (*tx_bounce_buffs)[PKT_BUF_SZ];
225 	int cur_rx, cur_tx;			/* The next free ring entry */
226 	int dirty_rx, dirty_tx;		/* The ring entries to be free()ed. */
227 	int dma;
228 	struct net_device_stats stats;
229 	unsigned char chip_version;	/* See lance_chip_type. */
230 	spinlock_t devlock;
231 };
232 
233 #define LANCE_MUST_PAD          0x00000001
234 #define LANCE_ENABLE_AUTOSELECT 0x00000002
235 #define LANCE_MUST_REINIT_RING  0x00000004
236 #define LANCE_MUST_UNRESET      0x00000008
237 #define LANCE_HAS_MISSED_FRAME  0x00000010
238 
239 /* A mapping from the chip ID number to the part number and features.
240    These are from the datasheets -- in real life the '970 version
241    reportedly has the same ID as the '965. */
242 static struct lance_chip_type {
243 	int id_number;
244 	const char *name;
245 	int flags;
246 } chip_table[] = {
247 	{0x0000, "LANCE 7990",				/* Ancient lance chip.  */
248 		LANCE_MUST_PAD + LANCE_MUST_UNRESET},
249 	{0x0003, "PCnet/ISA 79C960",		/* 79C960 PCnet/ISA.  */
250 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
251 			LANCE_HAS_MISSED_FRAME},
252 	{0x2260, "PCnet/ISA+ 79C961",		/* 79C961 PCnet/ISA+, Plug-n-Play.  */
253 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
254 			LANCE_HAS_MISSED_FRAME},
255 	{0x2420, "PCnet/PCI 79C970",		/* 79C970 or 79C974 PCnet-SCSI, PCI. */
256 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
257 			LANCE_HAS_MISSED_FRAME},
258 	/* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
259 		it the PCnet32. */
260 	{0x2430, "PCnet32",					/* 79C965 PCnet for VL bus. */
261 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
262 			LANCE_HAS_MISSED_FRAME},
263         {0x2621, "PCnet/PCI-II 79C970A",        /* 79C970A PCInetPCI II. */
264                 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
265                         LANCE_HAS_MISSED_FRAME},
266 	{0x0, 	 "PCnet (unknown)",
267 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
268 			LANCE_HAS_MISSED_FRAME},
269 };
270 
271 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
272 
273 
274 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
275    Assume yes until we know the memory size. */
276 static unsigned char lance_need_isa_bounce_buffers = 1;
277 
278 static int lance_open(struct net_device *dev);
279 static int lance_open_fail(struct net_device *dev);
280 static void lance_init_ring(struct net_device *dev, int mode);
281 static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
282 static int lance_rx(struct net_device *dev);
283 static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
284 static int lance_close(struct net_device *dev);
285 static struct net_device_stats *lance_get_stats(struct net_device *dev);
286 static void set_multicast_list(struct net_device *dev);
287 static void lance_tx_timeout (struct net_device *dev);
288 
289 
290 
291 #ifdef MODULE
292 #define MAX_CARDS		8	/* Max number of interfaces (cards) per module */
293 
294 static struct net_device dev_lance[MAX_CARDS];
295 static int io[MAX_CARDS];
296 static int dma[MAX_CARDS];
297 static int irq[MAX_CARDS];
298 
299 MODULE_PARM(io, "1-" __MODULE_STRING(MAX_CARDS) "i");
300 MODULE_PARM(dma, "1-" __MODULE_STRING(MAX_CARDS) "i");
301 MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_CARDS) "i");
302 MODULE_PARM(lance_debug, "i");
303 MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
304 MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
305 MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
306 MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
307 
init_module(void)308 int init_module(void)
309 {
310 	int this_dev, found = 0;
311 
312 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
313 		struct net_device *dev = &dev_lance[this_dev];
314 		dev->irq = irq[this_dev];
315 		dev->base_addr = io[this_dev];
316 		dev->dma = dma[this_dev];
317 		dev->init = lance_probe;
318 		if (io[this_dev] == 0)  {
319 			if (this_dev != 0) break; /* only complain once */
320 			printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
321 			return -EPERM;
322 		}
323 		if (register_netdev(dev) != 0) {
324 			printk(KERN_WARNING "lance.c: No PCnet/LANCE card found (i/o = 0x%x).\n", io[this_dev]);
325 			if (found != 0) return 0;	/* Got at least one. */
326 			return -ENXIO;
327 		}
328 		found++;
329 	}
330 
331 	return 0;
332 }
333 
cleanup_module(void)334 void cleanup_module(void)
335 {
336 	int this_dev;
337 
338 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
339 		struct net_device *dev = &dev_lance[this_dev];
340 		if (dev->priv != NULL) {
341 			unregister_netdev(dev);
342 			free_dma(dev->dma);
343 			release_region(dev->base_addr, LANCE_TOTAL_SIZE);
344 			kfree(dev->priv);
345 			dev->priv = NULL;
346 		}
347 	}
348 }
349 #endif /* MODULE */
350 MODULE_LICENSE("GPL");
351 
352 
353 /* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
354    board probes now that kmalloc() can allocate ISA DMA-able regions.
355    This also allows the LANCE driver to be used as a module.
356    */
lance_probe(struct net_device * dev)357 int __init lance_probe(struct net_device *dev)
358 {
359 	int *port, result;
360 
361 	if (high_memory <= phys_to_virt(16*1024*1024))
362 		lance_need_isa_bounce_buffers = 0;
363 
364 	for (port = lance_portlist; *port; port++) {
365 		int ioaddr = *port;
366 		struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
367 							"lance-probe");
368 
369 		if (r) {
370 			/* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
371 			   signatures w/ minimal I/O reads */
372 			char offset15, offset14 = inb(ioaddr + 14);
373 
374 			if ((offset14 == 0x52 || offset14 == 0x57) &&
375 				((offset15 = inb(ioaddr + 15)) == 0x57 ||
376 				 offset15 == 0x44)) {
377 				result = lance_probe1(dev, ioaddr, 0, 0);
378 				if (!result) {
379 					struct lance_private *lp = dev->priv;
380 					int ver = lp->chip_version;
381 
382 					r->name = chip_table[ver].name;
383 					return 0;
384 				}
385 			}
386 			release_region(ioaddr, LANCE_TOTAL_SIZE);
387 		}
388 	}
389 	return -ENODEV;
390 }
391 
lance_probe1(struct net_device * dev,int ioaddr,int irq,int options)392 static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
393 {
394 	struct lance_private *lp;
395 	short dma_channels;					/* Mark spuriously-busy DMA channels */
396 	int i, reset_val, lance_version;
397 	const char *chipname;
398 	/* Flags for specific chips or boards. */
399 	unsigned char hpJ2405A = 0;		/* HP ISA adaptor */
400 	int hp_builtin = 0;			/* HP on-board ethernet. */
401 	static int did_version;			/* Already printed version info. */
402 	unsigned long flags;
403 
404 	/* First we look for special cases.
405 	   Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
406 	   There are two HP versions, check the BIOS for the configuration port.
407 	   This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
408 	   */
409 	if (isa_readw(0x000f0102) == 0x5048)  {
410 		static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
411 		int hp_port = (isa_readl(0x000f00f1) & 1)  ? 0x499 : 0x99;
412 		/* We can have boards other than the built-in!  Verify this is on-board. */
413 		if ((inb(hp_port) & 0xc0) == 0x80
414 			&& ioaddr_table[inb(hp_port) & 3] == ioaddr)
415 			hp_builtin = hp_port;
416 	}
417 	/* We also recognize the HP Vectra on-board here, but check below. */
418 	hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
419 				&& inb(ioaddr+2) == 0x09);
420 
421 	/* Reset the LANCE.	 */
422 	reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
423 
424 	/* The Un-Reset needed is only needed for the real NE2100, and will
425 	   confuse the HP board. */
426 	if (!hpJ2405A)
427 		outw(reset_val, ioaddr+LANCE_RESET);
428 
429 	outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
430 	if (inw(ioaddr+LANCE_DATA) != 0x0004)
431 		return -ENODEV;
432 
433 	/* Get the version of the chip. */
434 	outw(88, ioaddr+LANCE_ADDR);
435 	if (inw(ioaddr+LANCE_ADDR) != 88) {
436 		lance_version = 0;
437 	} else {							/* Good, it's a newer chip. */
438 		int chip_version = inw(ioaddr+LANCE_DATA);
439 		outw(89, ioaddr+LANCE_ADDR);
440 		chip_version |= inw(ioaddr+LANCE_DATA) << 16;
441 		if (lance_debug > 2)
442 			printk("  LANCE chip version is %#x.\n", chip_version);
443 		if ((chip_version & 0xfff) != 0x003)
444 			return -ENODEV;
445 		chip_version = (chip_version >> 12) & 0xffff;
446 		for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
447 			if (chip_table[lance_version].id_number == chip_version)
448 				break;
449 		}
450 	}
451 
452 	/* We can't use init_etherdev() to allocate dev->priv because it must
453 	   a ISA DMA-able region. */
454 	dev = init_etherdev(dev, 0);
455 	if (!dev)
456 		return -ENOMEM;
457 	SET_MODULE_OWNER(dev);
458 	dev->open = lance_open_fail;
459 	chipname = chip_table[lance_version].name;
460 	printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
461 
462 	/* There is a 16 byte station address PROM at the base address.
463 	   The first six bytes are the station address. */
464 	for (i = 0; i < 6; i++)
465 		printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
466 
467 	dev->base_addr = ioaddr;
468 	/* Make certain the data structures used by the LANCE are aligned and DMAble. */
469 
470 	lp = (struct lance_private *)(((unsigned long)kmalloc(sizeof(*lp)+7,
471 					   GFP_DMA | GFP_KERNEL)+7) & ~7);
472 	if(lp==NULL)
473 		return -ENODEV;
474 	if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
475 	memset(lp, 0, sizeof(*lp));
476 	dev->priv = lp;
477 	lp->name = chipname;
478 	lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
479 						  GFP_DMA | GFP_KERNEL);
480 	if (!lp->rx_buffs)
481 		goto out_lp;
482 	if (lance_need_isa_bounce_buffers) {
483 		lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
484 						  GFP_DMA | GFP_KERNEL);
485 		if (!lp->tx_bounce_buffs)
486 			goto out_rx;
487 	} else
488 		lp->tx_bounce_buffs = NULL;
489 
490 	lp->chip_version = lance_version;
491 	lp->devlock = SPIN_LOCK_UNLOCKED;
492 
493 	lp->init_block.mode = 0x0003;		/* Disable Rx and Tx. */
494 	for (i = 0; i < 6; i++)
495 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
496 	lp->init_block.filter[0] = 0x00000000;
497 	lp->init_block.filter[1] = 0x00000000;
498 	lp->init_block.rx_ring = ((u32)virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
499 	lp->init_block.tx_ring = ((u32)virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
500 
501 	outw(0x0001, ioaddr+LANCE_ADDR);
502 	inw(ioaddr+LANCE_ADDR);
503 	outw((short) (u32) virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
504 	outw(0x0002, ioaddr+LANCE_ADDR);
505 	inw(ioaddr+LANCE_ADDR);
506 	outw(((u32)virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
507 	outw(0x0000, ioaddr+LANCE_ADDR);
508 	inw(ioaddr+LANCE_ADDR);
509 
510 	if (irq) {					/* Set iff PCI card. */
511 		dev->dma = 4;			/* Native bus-master, no DMA channel needed. */
512 		dev->irq = irq;
513 	} else if (hp_builtin) {
514 		static const char dma_tbl[4] = {3, 5, 6, 0};
515 		static const char irq_tbl[4] = {3, 4, 5, 9};
516 		unsigned char port_val = inb(hp_builtin);
517 		dev->dma = dma_tbl[(port_val >> 4) & 3];
518 		dev->irq = irq_tbl[(port_val >> 2) & 3];
519 		printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
520 	} else if (hpJ2405A) {
521 		static const char dma_tbl[4] = {3, 5, 6, 7};
522 		static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
523 		short reset_val = inw(ioaddr+LANCE_RESET);
524 		dev->dma = dma_tbl[(reset_val >> 2) & 3];
525 		dev->irq = irq_tbl[(reset_val >> 4) & 7];
526 		printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
527 	} else if (lance_version == PCNET_ISAP) {		/* The plug-n-play version. */
528 		short bus_info;
529 		outw(8, ioaddr+LANCE_ADDR);
530 		bus_info = inw(ioaddr+LANCE_BUS_IF);
531 		dev->dma = bus_info & 0x07;
532 		dev->irq = (bus_info >> 4) & 0x0F;
533 	} else {
534 		/* The DMA channel may be passed in PARAM1. */
535 		if (dev->mem_start & 0x07)
536 			dev->dma = dev->mem_start & 0x07;
537 	}
538 
539 	if (dev->dma == 0) {
540 		/* Read the DMA channel status register, so that we can avoid
541 		   stuck DMA channels in the DMA detection below. */
542 		dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
543 			(inb(DMA2_STAT_REG) & 0xf0);
544 	}
545 	if (dev->irq >= 2)
546 		printk(" assigned IRQ %d", dev->irq);
547 	else if (lance_version != 0)  {	/* 7990 boards need DMA detection first. */
548 		/* To auto-IRQ we enable the initialization-done and DMA error
549 		   interrupts. For ISA boards we get a DMA error, but VLB and PCI
550 		   boards will work. */
551 		autoirq_setup(0);
552 
553 		/* Trigger an initialization just for the interrupt. */
554 		outw(0x0041, ioaddr+LANCE_DATA);
555 
556 		dev->irq = autoirq_report(2);
557 		if (dev->irq)
558 			printk(", probed IRQ %d", dev->irq);
559 		else {
560 			printk(", failed to detect IRQ line.\n");
561 			return -ENODEV;
562 		}
563 
564 		/* Check for the initialization done bit, 0x0100, which means
565 		   that we don't need a DMA channel. */
566 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
567 			dev->dma = 4;
568 	}
569 
570 	if (dev->dma == 4) {
571 		printk(", no DMA needed.\n");
572 	} else if (dev->dma) {
573 		if (request_dma(dev->dma, chipname)) {
574 			printk("DMA %d allocation failed.\n", dev->dma);
575 			return -ENODEV;
576 		} else
577 			printk(", assigned DMA %d.\n", dev->dma);
578 	} else {			/* OK, we have to auto-DMA. */
579 		for (i = 0; i < 4; i++) {
580 			static const char dmas[] = { 5, 6, 7, 3 };
581 			int dma = dmas[i];
582 			int boguscnt;
583 
584 			/* Don't enable a permanently busy DMA channel, or the machine
585 			   will hang. */
586 			if (test_bit(dma, &dma_channels))
587 				continue;
588 			outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
589 			if (request_dma(dma, chipname))
590 				continue;
591 
592 			flags=claim_dma_lock();
593 			set_dma_mode(dma, DMA_MODE_CASCADE);
594 			enable_dma(dma);
595 			release_dma_lock(flags);
596 
597 			/* Trigger an initialization. */
598 			outw(0x0001, ioaddr+LANCE_DATA);
599 			for (boguscnt = 100; boguscnt > 0; --boguscnt)
600 				if (inw(ioaddr+LANCE_DATA) & 0x0900)
601 					break;
602 			if (inw(ioaddr+LANCE_DATA) & 0x0100) {
603 				dev->dma = dma;
604 				printk(", DMA %d.\n", dev->dma);
605 				break;
606 			} else {
607 				flags=claim_dma_lock();
608 				disable_dma(dma);
609 				release_dma_lock(flags);
610 				free_dma(dma);
611 			}
612 		}
613 		if (i == 4) {			/* Failure: bail. */
614 			printk("DMA detection failed.\n");
615 			return -ENODEV;
616 		}
617 	}
618 
619 	if (lance_version == 0 && dev->irq == 0) {
620 		/* We may auto-IRQ now that we have a DMA channel. */
621 		/* Trigger an initialization just for the interrupt. */
622 		autoirq_setup(0);
623 		outw(0x0041, ioaddr+LANCE_DATA);
624 
625 		dev->irq = autoirq_report(4);
626 		if (dev->irq == 0) {
627 			printk("  Failed to detect the 7990 IRQ line.\n");
628 			return -ENODEV;
629 		}
630 		printk("  Auto-IRQ detected IRQ%d.\n", dev->irq);
631 	}
632 
633 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
634 		/* Turn on auto-select of media (10baseT or BNC) so that the user
635 		   can watch the LEDs even if the board isn't opened. */
636 		outw(0x0002, ioaddr+LANCE_ADDR);
637 		/* Don't touch 10base2 power bit. */
638 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
639 	}
640 
641 	if (lance_debug > 0  &&  did_version++ == 0)
642 		printk(version);
643 
644 	/* The LANCE-specific entries in the device structure. */
645 	dev->open = lance_open;
646 	dev->hard_start_xmit = lance_start_xmit;
647 	dev->stop = lance_close;
648 	dev->get_stats = lance_get_stats;
649 	dev->set_multicast_list = set_multicast_list;
650 	dev->tx_timeout = lance_tx_timeout;
651 	dev->watchdog_timeo = TX_TIMEOUT;
652 
653 	return 0;
654 out_rx:	kfree((void*)lp->rx_buffs);
655 out_lp:	kfree(lp);
656 	return -ENOMEM;
657 }
658 
659 static int
lance_open_fail(struct net_device * dev)660 lance_open_fail(struct net_device *dev)
661 {
662 	return -ENODEV;
663 }
664 
665 
666 
667 static int
lance_open(struct net_device * dev)668 lance_open(struct net_device *dev)
669 {
670 	struct lance_private *lp = dev->priv;
671 	int ioaddr = dev->base_addr;
672 	int i;
673 
674 	if (dev->irq == 0 ||
675 		request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
676 		return -EAGAIN;
677 	}
678 
679 	/* We used to allocate DMA here, but that was silly.
680 	   DMA lines can't be shared!  We now permanently allocate them. */
681 
682 	/* Reset the LANCE */
683 	inw(ioaddr+LANCE_RESET);
684 
685 	/* The DMA controller is used as a no-operation slave, "cascade mode". */
686 	if (dev->dma != 4) {
687 		unsigned long flags=claim_dma_lock();
688 		enable_dma(dev->dma);
689 		set_dma_mode(dev->dma, DMA_MODE_CASCADE);
690 		release_dma_lock(flags);
691 	}
692 
693 	/* Un-Reset the LANCE, needed only for the NE2100. */
694 	if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
695 		outw(0, ioaddr+LANCE_RESET);
696 
697 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
698 		/* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
699 		outw(0x0002, ioaddr+LANCE_ADDR);
700 		/* Only touch autoselect bit. */
701 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
702  	}
703 
704 	if (lance_debug > 1)
705 		printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
706 			   dev->name, dev->irq, dev->dma,
707 		           (u32) virt_to_bus(lp->tx_ring),
708 		           (u32) virt_to_bus(lp->rx_ring),
709 			   (u32) virt_to_bus(&lp->init_block));
710 
711 	lance_init_ring(dev, GFP_KERNEL);
712 	/* Re-initialize the LANCE, and start it when done. */
713 	outw(0x0001, ioaddr+LANCE_ADDR);
714 	outw((short) (u32) virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
715 	outw(0x0002, ioaddr+LANCE_ADDR);
716 	outw(((u32)virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
717 
718 	outw(0x0004, ioaddr+LANCE_ADDR);
719 	outw(0x0915, ioaddr+LANCE_DATA);
720 
721 	outw(0x0000, ioaddr+LANCE_ADDR);
722 	outw(0x0001, ioaddr+LANCE_DATA);
723 
724 	netif_start_queue (dev);
725 
726 	i = 0;
727 	while (i++ < 100)
728 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
729 			break;
730 	/*
731 	 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
732 	 * reports that doing so triggers a bug in the '974.
733 	 */
734  	outw(0x0042, ioaddr+LANCE_DATA);
735 
736 	if (lance_debug > 2)
737 		printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
738 			   dev->name, i, (u32) virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
739 
740 	return 0;					/* Always succeed */
741 }
742 
743 /* The LANCE has been halted for one reason or another (busmaster memory
744    arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
745    etc.).  Modern LANCE variants always reload their ring-buffer
746    configuration when restarted, so we must reinitialize our ring
747    context before restarting.  As part of this reinitialization,
748    find all packets still on the Tx ring and pretend that they had been
749    sent (in effect, drop the packets on the floor) - the higher-level
750    protocols will time out and retransmit.  It'd be better to shuffle
751    these skbs to a temp list and then actually re-Tx them after
752    restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
753 */
754 
755 static void
lance_purge_ring(struct net_device * dev)756 lance_purge_ring(struct net_device *dev)
757 {
758 	struct lance_private *lp = dev->priv;
759 	int i;
760 
761 	/* Free all the skbuffs in the Rx and Tx queues. */
762 	for (i = 0; i < RX_RING_SIZE; i++) {
763 		struct sk_buff *skb = lp->rx_skbuff[i];
764 		lp->rx_skbuff[i] = 0;
765 		lp->rx_ring[i].base = 0;		/* Not owned by LANCE chip. */
766 		if (skb)
767 			dev_kfree_skb_any(skb);
768 	}
769 	for (i = 0; i < TX_RING_SIZE; i++) {
770 		if (lp->tx_skbuff[i]) {
771 			dev_kfree_skb_any(lp->tx_skbuff[i]);
772 			lp->tx_skbuff[i] = NULL;
773 		}
774 	}
775 }
776 
777 
778 /* Initialize the LANCE Rx and Tx rings. */
779 static void
lance_init_ring(struct net_device * dev,int gfp)780 lance_init_ring(struct net_device *dev, int gfp)
781 {
782 	struct lance_private *lp = dev->priv;
783 	int i;
784 
785 	lp->cur_rx = lp->cur_tx = 0;
786 	lp->dirty_rx = lp->dirty_tx = 0;
787 
788 	for (i = 0; i < RX_RING_SIZE; i++) {
789 		struct sk_buff *skb;
790 		void *rx_buff;
791 
792 		skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
793 		lp->rx_skbuff[i] = skb;
794 		if (skb) {
795 			skb->dev = dev;
796 			rx_buff = skb->tail;
797 		} else
798 			rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
799 		if (rx_buff == NULL)
800 			lp->rx_ring[i].base = 0;
801 		else
802 			lp->rx_ring[i].base = (u32)virt_to_bus(rx_buff) | 0x80000000;
803 		lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
804 	}
805 	/* The Tx buffer address is filled in as needed, but we do need to clear
806 	   the upper ownership bit. */
807 	for (i = 0; i < TX_RING_SIZE; i++) {
808 		lp->tx_skbuff[i] = 0;
809 		lp->tx_ring[i].base = 0;
810 	}
811 
812 	lp->init_block.mode = 0x0000;
813 	for (i = 0; i < 6; i++)
814 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
815 	lp->init_block.filter[0] = 0x00000000;
816 	lp->init_block.filter[1] = 0x00000000;
817 	lp->init_block.rx_ring = ((u32)virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
818 	lp->init_block.tx_ring = ((u32)virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
819 }
820 
821 static void
lance_restart(struct net_device * dev,unsigned int csr0_bits,int must_reinit)822 lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
823 {
824 	struct lance_private *lp = dev->priv;
825 
826 	if (must_reinit ||
827 		(chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
828 		lance_purge_ring(dev);
829 		lance_init_ring(dev, GFP_ATOMIC);
830 	}
831 	outw(0x0000,    dev->base_addr + LANCE_ADDR);
832 	outw(csr0_bits, dev->base_addr + LANCE_DATA);
833 }
834 
835 
lance_tx_timeout(struct net_device * dev)836 static void lance_tx_timeout (struct net_device *dev)
837 {
838 	struct lance_private *lp = (struct lance_private *) dev->priv;
839 	int ioaddr = dev->base_addr;
840 
841 	outw (0, ioaddr + LANCE_ADDR);
842 	printk ("%s: transmit timed out, status %4.4x, resetting.\n",
843 		dev->name, inw (ioaddr + LANCE_DATA));
844 	outw (0x0004, ioaddr + LANCE_DATA);
845 	lp->stats.tx_errors++;
846 #ifndef final_version
847 	if (lance_debug > 3) {
848 		int i;
849 		printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
850 		  lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
851 			lp->cur_rx);
852 		for (i = 0; i < RX_RING_SIZE; i++)
853 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
854 			 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
855 				lp->rx_ring[i].msg_length);
856 		for (i = 0; i < TX_RING_SIZE; i++)
857 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
858 			     lp->tx_ring[i].base, -lp->tx_ring[i].length,
859 				lp->tx_ring[i].misc);
860 		printk ("\n");
861 	}
862 #endif
863 	lance_restart (dev, 0x0043, 1);
864 
865 	dev->trans_start = jiffies;
866 	netif_wake_queue (dev);
867 }
868 
869 
lance_start_xmit(struct sk_buff * skb,struct net_device * dev)870 static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
871 {
872 	struct lance_private *lp = dev->priv;
873 	int ioaddr = dev->base_addr;
874 	int entry;
875 	unsigned long flags;
876 
877 	spin_lock_irqsave(&lp->devlock, flags);
878 
879 	if (lance_debug > 3) {
880 		outw(0x0000, ioaddr+LANCE_ADDR);
881 		printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
882 			   inw(ioaddr+LANCE_DATA));
883 		outw(0x0000, ioaddr+LANCE_DATA);
884 	}
885 
886 	/* Fill in a Tx ring entry */
887 
888 	/* Mask to ring buffer boundary. */
889 	entry = lp->cur_tx & TX_RING_MOD_MASK;
890 
891 	/* Caution: the write order is important here, set the base address
892 	   with the "ownership" bits last. */
893 
894 	/* The old LANCE chips doesn't automatically pad buffers to min. size. */
895 	if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
896 		if(skb->len < ETH_ZLEN)
897 		{
898 			skb = skb_padto(skb, ETH_ZLEN);
899 			if(skb == NULL)
900 				goto out;
901 			lp->tx_ring[entry].length = -ETH_ZLEN;
902 		}
903 		else
904 			lp->tx_ring[entry].length = -skb->len;
905 	} else
906 		lp->tx_ring[entry].length = -skb->len;
907 
908 	lp->tx_ring[entry].misc = 0x0000;
909 
910 	lp->stats.tx_bytes += skb->len;
911 
912 	/* If any part of this buffer is >16M we must copy it to a low-memory
913 	   buffer. */
914 	if ((u32)virt_to_bus(skb->data) + skb->len > 0x01000000) {
915 		if (lance_debug > 5)
916 			printk("%s: bouncing a high-memory packet (%#x).\n",
917 				   dev->name, (u32)virt_to_bus(skb->data));
918 		memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
919 		lp->tx_ring[entry].base =
920 			((u32)virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
921 		dev_kfree_skb(skb);
922 	} else {
923 		lp->tx_skbuff[entry] = skb;
924 		lp->tx_ring[entry].base = ((u32)virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
925 	}
926 	lp->cur_tx++;
927 
928 	/* Trigger an immediate send poll. */
929 	outw(0x0000, ioaddr+LANCE_ADDR);
930 	outw(0x0048, ioaddr+LANCE_DATA);
931 
932 	dev->trans_start = jiffies;
933 
934 	if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
935 		netif_stop_queue(dev);
936 
937 out:
938 	spin_unlock_irqrestore(&lp->devlock, flags);
939 	return 0;
940 }
941 
942 /* The LANCE interrupt handler. */
943 static void
lance_interrupt(int irq,void * dev_id,struct pt_regs * regs)944 lance_interrupt(int irq, void *dev_id, struct pt_regs * regs)
945 {
946 	struct net_device *dev = dev_id;
947 	struct lance_private *lp;
948 	int csr0, ioaddr, boguscnt=10;
949 	int must_restart;
950 
951 	if (dev == NULL) {
952 		printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
953 		return;
954 	}
955 
956 	ioaddr = dev->base_addr;
957 	lp = dev->priv;
958 
959 	spin_lock (&lp->devlock);
960 
961 	outw(0x00, dev->base_addr + LANCE_ADDR);
962 	while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
963 		   && --boguscnt >= 0) {
964 		/* Acknowledge all of the current interrupt sources ASAP. */
965 		outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
966 
967 		must_restart = 0;
968 
969 		if (lance_debug > 5)
970 			printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
971 				   dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
972 
973 		if (csr0 & 0x0400)			/* Rx interrupt */
974 			lance_rx(dev);
975 
976 		if (csr0 & 0x0200) {		/* Tx-done interrupt */
977 			int dirty_tx = lp->dirty_tx;
978 
979 			while (dirty_tx < lp->cur_tx) {
980 				int entry = dirty_tx & TX_RING_MOD_MASK;
981 				int status = lp->tx_ring[entry].base;
982 
983 				if (status < 0)
984 					break;			/* It still hasn't been Txed */
985 
986 				lp->tx_ring[entry].base = 0;
987 
988 				if (status & 0x40000000) {
989 					/* There was an major error, log it. */
990 					int err_status = lp->tx_ring[entry].misc;
991 					lp->stats.tx_errors++;
992 					if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
993 					if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
994 					if (err_status & 0x1000) lp->stats.tx_window_errors++;
995 					if (err_status & 0x4000) {
996 						/* Ackk!  On FIFO errors the Tx unit is turned off! */
997 						lp->stats.tx_fifo_errors++;
998 						/* Remove this verbosity later! */
999 						printk("%s: Tx FIFO error! Status %4.4x.\n",
1000 							   dev->name, csr0);
1001 						/* Restart the chip. */
1002 						must_restart = 1;
1003 					}
1004 				} else {
1005 					if (status & 0x18000000)
1006 						lp->stats.collisions++;
1007 					lp->stats.tx_packets++;
1008 				}
1009 
1010 				/* We must free the original skb if it's not a data-only copy
1011 				   in the bounce buffer. */
1012 				if (lp->tx_skbuff[entry]) {
1013 					dev_kfree_skb_irq(lp->tx_skbuff[entry]);
1014 					lp->tx_skbuff[entry] = 0;
1015 				}
1016 				dirty_tx++;
1017 			}
1018 
1019 #ifndef final_version
1020 			if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1021 				printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1022 					   dirty_tx, lp->cur_tx,
1023 					   netif_queue_stopped(dev) ? "yes" : "no");
1024 				dirty_tx += TX_RING_SIZE;
1025 			}
1026 #endif
1027 
1028 			/* if the ring is no longer full, accept more packets */
1029 			if (netif_queue_stopped(dev) &&
1030 			    dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1031 				netif_wake_queue (dev);
1032 
1033 			lp->dirty_tx = dirty_tx;
1034 		}
1035 
1036 		/* Log misc errors. */
1037 		if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
1038 		if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
1039 		if (csr0 & 0x0800) {
1040 			printk("%s: Bus master arbitration failure, status %4.4x.\n",
1041 				   dev->name, csr0);
1042 			/* Restart the chip. */
1043 			must_restart = 1;
1044 		}
1045 
1046 		if (must_restart) {
1047 			/* stop the chip to clear the error condition, then restart */
1048 			outw(0x0000, dev->base_addr + LANCE_ADDR);
1049 			outw(0x0004, dev->base_addr + LANCE_DATA);
1050 			lance_restart(dev, 0x0002, 0);
1051 		}
1052 	}
1053 
1054 	/* Clear any other interrupt, and set interrupt enable. */
1055 	outw(0x0000, dev->base_addr + LANCE_ADDR);
1056 	outw(0x7940, dev->base_addr + LANCE_DATA);
1057 
1058 	if (lance_debug > 4)
1059 		printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1060 			   dev->name, inw(ioaddr + LANCE_ADDR),
1061 			   inw(dev->base_addr + LANCE_DATA));
1062 
1063 	spin_unlock (&lp->devlock);
1064 }
1065 
1066 static int
lance_rx(struct net_device * dev)1067 lance_rx(struct net_device *dev)
1068 {
1069 	struct lance_private *lp = dev->priv;
1070 	int entry = lp->cur_rx & RX_RING_MOD_MASK;
1071 	int i;
1072 
1073 	/* If we own the next entry, it's a new packet. Send it up. */
1074 	while (lp->rx_ring[entry].base >= 0) {
1075 		int status = lp->rx_ring[entry].base >> 24;
1076 
1077 		if (status != 0x03) {			/* There was an error. */
1078 			/* There is a tricky error noted by John Murphy,
1079 			   <murf@perftech.com> to Russ Nelson: Even with full-sized
1080 			   buffers it's possible for a jabber packet to use two
1081 			   buffers, with only the last correctly noting the error. */
1082 			if (status & 0x01)	/* Only count a general error at the */
1083 				lp->stats.rx_errors++; /* end of a packet.*/
1084 			if (status & 0x20) lp->stats.rx_frame_errors++;
1085 			if (status & 0x10) lp->stats.rx_over_errors++;
1086 			if (status & 0x08) lp->stats.rx_crc_errors++;
1087 			if (status & 0x04) lp->stats.rx_fifo_errors++;
1088 			lp->rx_ring[entry].base &= 0x03ffffff;
1089 		}
1090 		else
1091 		{
1092 			/* Malloc up new buffer, compatible with net3. */
1093 			short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1094 			struct sk_buff *skb;
1095 
1096 			if(pkt_len<60)
1097 			{
1098 				printk("%s: Runt packet!\n",dev->name);
1099 				lp->stats.rx_errors++;
1100 			}
1101 			else
1102 			{
1103 				skb = dev_alloc_skb(pkt_len+2);
1104 				if (skb == NULL)
1105 				{
1106 					printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1107 					for (i=0; i < RX_RING_SIZE; i++)
1108 						if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1109 							break;
1110 
1111 					if (i > RX_RING_SIZE -2)
1112 					{
1113 						lp->stats.rx_dropped++;
1114 						lp->rx_ring[entry].base |= 0x80000000;
1115 						lp->cur_rx++;
1116 					}
1117 					break;
1118 				}
1119 				skb->dev = dev;
1120 				skb_reserve(skb,2);	/* 16 byte align */
1121 				skb_put(skb,pkt_len);	/* Make room */
1122 				eth_copy_and_sum(skb,
1123 					(unsigned char *)bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1124 					pkt_len,0);
1125 				skb->protocol=eth_type_trans(skb,dev);
1126 				netif_rx(skb);
1127 				dev->last_rx = jiffies;
1128 				lp->stats.rx_packets++;
1129 				lp->stats.rx_bytes+=pkt_len;
1130 			}
1131 		}
1132 		/* The docs say that the buffer length isn't touched, but Andrew Boyd
1133 		   of QNX reports that some revs of the 79C965 clear it. */
1134 		lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1135 		lp->rx_ring[entry].base |= 0x80000000;
1136 		entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1137 	}
1138 
1139 	/* We should check that at least two ring entries are free.	 If not,
1140 	   we should free one and mark stats->rx_dropped++. */
1141 
1142 	return 0;
1143 }
1144 
1145 static int
lance_close(struct net_device * dev)1146 lance_close(struct net_device *dev)
1147 {
1148 	int ioaddr = dev->base_addr;
1149 	struct lance_private *lp = dev->priv;
1150 
1151 	netif_stop_queue (dev);
1152 
1153 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1154 		outw(112, ioaddr+LANCE_ADDR);
1155 		lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1156 	}
1157 	outw(0, ioaddr+LANCE_ADDR);
1158 
1159 	if (lance_debug > 1)
1160 		printk("%s: Shutting down ethercard, status was %2.2x.\n",
1161 			   dev->name, inw(ioaddr+LANCE_DATA));
1162 
1163 	/* We stop the LANCE here -- it occasionally polls
1164 	   memory if we don't. */
1165 	outw(0x0004, ioaddr+LANCE_DATA);
1166 
1167 	if (dev->dma != 4)
1168 	{
1169 		unsigned long flags=claim_dma_lock();
1170 		disable_dma(dev->dma);
1171 		release_dma_lock(flags);
1172 	}
1173 	free_irq(dev->irq, dev);
1174 
1175 	lance_purge_ring(dev);
1176 
1177 	return 0;
1178 }
1179 
lance_get_stats(struct net_device * dev)1180 static struct net_device_stats *lance_get_stats(struct net_device *dev)
1181 {
1182 	struct lance_private *lp = dev->priv;
1183 
1184 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1185 		short ioaddr = dev->base_addr;
1186 		short saved_addr;
1187 		unsigned long flags;
1188 
1189 		spin_lock_irqsave(&lp->devlock, flags);
1190 		saved_addr = inw(ioaddr+LANCE_ADDR);
1191 		outw(112, ioaddr+LANCE_ADDR);
1192 		lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1193 		outw(saved_addr, ioaddr+LANCE_ADDR);
1194 		spin_unlock_irqrestore(&lp->devlock, flags);
1195 	}
1196 
1197 	return &lp->stats;
1198 }
1199 
1200 /* Set or clear the multicast filter for this adaptor.
1201  */
1202 
set_multicast_list(struct net_device * dev)1203 static void set_multicast_list(struct net_device *dev)
1204 {
1205 	short ioaddr = dev->base_addr;
1206 
1207 	outw(0, ioaddr+LANCE_ADDR);
1208 	outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.	 */
1209 
1210 	if (dev->flags&IFF_PROMISC) {
1211 		/* Log any net taps. */
1212 		printk("%s: Promiscuous mode enabled.\n", dev->name);
1213 		outw(15, ioaddr+LANCE_ADDR);
1214 		outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1215 	} else {
1216 		short multicast_table[4];
1217 		int i;
1218 		int num_addrs=dev->mc_count;
1219 		if(dev->flags&IFF_ALLMULTI)
1220 			num_addrs=1;
1221 		/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1222 		memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1223 		for (i = 0; i < 4; i++) {
1224 			outw(8 + i, ioaddr+LANCE_ADDR);
1225 			outw(multicast_table[i], ioaddr+LANCE_DATA);
1226 		}
1227 		outw(15, ioaddr+LANCE_ADDR);
1228 		outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1229 	}
1230 
1231 	lance_restart(dev, 0x0142, 0); /*  Resume normal operation */
1232 
1233 }
1234 
1235