1 /*
2  *	This program is free software; you can redistribute it and/or
3  *	modify it under the terms of the GNU General Public License
4  *	as published by the Free Software Foundation; either version
5  *	2 of the License, or (at your option) any later version.
6  *
7  *	(c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
8  *	(c) Copyright 2000, 2001 Red Hat Inc
9  *
10  *	Development of this driver was funded by Equiinet Ltd
11  *			http://www.equiinet.com
12  *
13  *	ChangeLog:
14  *
15  *	Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16  *	unification of all the Z85x30 asynchronous drivers for real.
17  *
18  *	DMA now uses get_free_page as kmalloc buffers may span a 64K
19  *	boundary.
20  *
21  *	Modified for SMP safety and SMP locking by Alan Cox <alan@redhat.com>
22  *
23  *	Performance
24  *
25  *	Z85230:
26  *	Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
27  *	X.25 is not unrealistic on all machines. DMA mode can in theory
28  *	handle T1/E1 quite nicely. In practice the limit seems to be about
29  *	512Kbit->1Mbit depending on motherboard.
30  *
31  *	Z85C30:
32  *	64K will take DMA, 9600 baud X.25 should be ok.
33  *
34  *	Z8530:
35  *	Synchronous mode without DMA is unlikely to pass about 2400 baud.
36  */
37 
38 #include <linux/module.h>
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/net.h>
42 #include <linux/skbuff.h>
43 #include <linux/netdevice.h>
44 #include <linux/if_arp.h>
45 #include <linux/delay.h>
46 #include <linux/ioport.h>
47 #include <linux/init.h>
48 #include <asm/dma.h>
49 #include <asm/io.h>
50 #define RT_LOCK
51 #define RT_UNLOCK
52 #include <linux/spinlock.h>
53 
54 #include <net/syncppp.h>
55 #include "z85230.h"
56 
57 
58 /**
59  *	z8530_read_port - Architecture specific interface function
60  *	@p: port to read
61  *
62  *	Provided port access methods. The Comtrol SV11 requires no delays
63  *	between accesses and uses PC I/O. Some drivers may need a 5uS delay
64  *
65  *	In the longer term this should become an architecture specific
66  *	section so that this can become a generic driver interface for all
67  *	platforms. For now we only handle PC I/O ports with or without the
68  *	dread 5uS sanity delay.
69  *
70  *	The caller must hold sufficient locks to avoid violating the horrible
71  *	5uS delay rule.
72  */
73 
z8530_read_port(unsigned long p)74 static inline int z8530_read_port(unsigned long p)
75 {
76 	u8 r=inb(Z8530_PORT_OF(p));
77 	if(p&Z8530_PORT_SLEEP)	/* gcc should figure this out efficiently ! */
78 		udelay(5);
79 	return r;
80 }
81 
82 /**
83  *	z8530_write_port - Architecture specific interface function
84  *	@p: port to write
85  *	@d: value to write
86  *
87  *	Write a value to a port with delays if need be. Note that the
88  *	caller must hold locks to avoid read/writes from other contexts
89  *	violating the 5uS rule
90  *
91  *	In the longer term this should become an architecture specific
92  *	section so that this can become a generic driver interface for all
93  *	platforms. For now we only handle PC I/O ports with or without the
94  *	dread 5uS sanity delay.
95  */
96 
97 
z8530_write_port(unsigned long p,u8 d)98 static inline void z8530_write_port(unsigned long p, u8 d)
99 {
100 	outb(d,Z8530_PORT_OF(p));
101 	if(p&Z8530_PORT_SLEEP)
102 		udelay(5);
103 }
104 
105 
106 
107 static void z8530_rx_done(struct z8530_channel *c);
108 static void z8530_tx_done(struct z8530_channel *c);
109 
110 
111 /**
112  *	read_zsreg - Read a register from a Z85230
113  *	@c: Z8530 channel to read from (2 per chip)
114  *	@reg: Register to read
115  *	FIXME: Use a spinlock.
116  *
117  *	Most of the Z8530 registers are indexed off the control registers.
118  *	A read is done by writing to the control register and reading the
119  *	register back.  The caller must hold the lock
120  */
121 
read_zsreg(struct z8530_channel * c,u8 reg)122 static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
123 {
124 	if(reg)
125 		z8530_write_port(c->ctrlio, reg);
126 	return z8530_read_port(c->ctrlio);
127 }
128 
129 /**
130  *	read_zsdata - Read the data port of a Z8530 channel
131  *	@c: The Z8530 channel to read the data port from
132  *
133  *	The data port provides fast access to some things. We still
134  *	have all the 5uS delays to worry about.
135  */
136 
read_zsdata(struct z8530_channel * c)137 static inline u8 read_zsdata(struct z8530_channel *c)
138 {
139 	u8 r;
140 	r=z8530_read_port(c->dataio);
141 	return r;
142 }
143 
144 /**
145  *	write_zsreg - Write to a Z8530 channel register
146  *	@c: The Z8530 channel
147  *	@reg: Register number
148  *	@val: Value to write
149  *
150  *	Write a value to an indexed register. The caller must hold the lock
151  *	to honour the irritating delay rules. We know about register 0
152  *	being fast to access.
153  */
154 
write_zsreg(struct z8530_channel * c,u8 reg,u8 val)155 static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
156 {
157 	unsigned long flags;
158 
159 	spin_lock_irqsave(c->lock, flags);
160 
161 	if(reg)
162 		z8530_write_port(c->ctrlio, reg);
163 	z8530_write_port(c->ctrlio, val);
164 
165 	spin_unlock_irqrestore(c->lock, flags);
166 }
167 
168 /**
169  *	write_zsctrl - Write to a Z8530 control register
170  *	@c: The Z8530 channel
171  *	@val: Value to write
172  *
173  *	Write directly to the control register on the Z8530
174  */
175 
write_zsctrl(struct z8530_channel * c,u8 val)176 static inline void write_zsctrl(struct z8530_channel *c, u8 val)
177 {
178 	z8530_write_port(c->ctrlio, val);
179 }
180 
181 /**
182  *	write_zsdata - Write to a Z8530 control register
183  *	@c: The Z8530 channel
184  *	@val: Value to write
185  *
186  *	Write directly to the data register on the Z8530
187  */
188 
189 
write_zsdata(struct z8530_channel * c,u8 val)190 static inline void write_zsdata(struct z8530_channel *c, u8 val)
191 {
192 	z8530_write_port(c->dataio, val);
193 }
194 
195 /*
196  *	Register loading parameters for a dead port
197  */
198 
199 u8 z8530_dead_port[]=
200 {
201 	255
202 };
203 
204 EXPORT_SYMBOL(z8530_dead_port);
205 
206 /*
207  *	Register loading parameters for currently supported circuit types
208  */
209 
210 
211 /*
212  *	Data clocked by telco end. This is the correct data for the UK
213  *	"kilostream" service, and most other similar services.
214  */
215 
216 u8 z8530_hdlc_kilostream[]=
217 {
218 	4,	SYNC_ENAB|SDLC|X1CLK,
219 	2,	0,	/* No vector */
220 	1,	0,
221 	3,	ENT_HM|RxCRC_ENAB|Rx8,
222 	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
223 	9,	0,		/* Disable interrupts */
224 	6,	0xFF,
225 	7,	FLAG,
226 	10,	ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
227 	11,	TCTRxCP,
228 	14,	DISDPLL,
229 	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
230 	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
231 	9,	NV|MIE|NORESET,
232 	255
233 };
234 
235 EXPORT_SYMBOL(z8530_hdlc_kilostream);
236 
237 /*
238  *	As above but for enhanced chips.
239  */
240 
241 u8 z8530_hdlc_kilostream_85230[]=
242 {
243 	4,	SYNC_ENAB|SDLC|X1CLK,
244 	2,	0,	/* No vector */
245 	1,	0,
246 	3,	ENT_HM|RxCRC_ENAB|Rx8,
247 	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
248 	9,	0,		/* Disable interrupts */
249 	6,	0xFF,
250 	7,	FLAG,
251 	10,	ABUNDER|NRZ|CRCPS,	/* MARKIDLE?? */
252 	11,	TCTRxCP,
253 	14,	DISDPLL,
254 	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
255 	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
256 	9,	NV|MIE|NORESET,
257 	23,	3,		/* Extended mode AUTO TX and EOM*/
258 
259 	255
260 };
261 
262 EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
263 
264 /**
265  *	z8530_flush_fifo - Flush on chip RX FIFO
266  *	@c: Channel to flush
267  *
268  *	Flush the receive FIFO. There is no specific option for this, we
269  *	blindly read bytes and discard them. Reading when there is no data
270  *	is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
271  *
272  *	All locking is handled for the caller. On return data may still be
273  *	present if it arrived during the flush.
274  */
275 
z8530_flush_fifo(struct z8530_channel * c)276 static void z8530_flush_fifo(struct z8530_channel *c)
277 {
278 	read_zsreg(c, R1);
279 	read_zsreg(c, R1);
280 	read_zsreg(c, R1);
281 	read_zsreg(c, R1);
282 	if(c->dev->type==Z85230)
283 	{
284 		read_zsreg(c, R1);
285 		read_zsreg(c, R1);
286 		read_zsreg(c, R1);
287 		read_zsreg(c, R1);
288 	}
289 }
290 
291 /**
292  *	z8530_rtsdtr - Control the outgoing DTS/RTS line
293  *	@c: The Z8530 channel to control;
294  *	@set: 1 to set, 0 to clear
295  *
296  *	Sets or clears DTR/RTS on the requested line. All locking is handled
297  *	by the caller. For now we assume all boards use the actual RTS/DTR
298  *	on the chip. Apparently one or two don't. We'll scream about them
299  *	later.
300  */
301 
z8530_rtsdtr(struct z8530_channel * c,int set)302 static void z8530_rtsdtr(struct z8530_channel *c, int set)
303 {
304 	if (set)
305 		c->regs[5] |= (RTS | DTR);
306 	else
307 		c->regs[5] &= ~(RTS | DTR);
308 	write_zsreg(c, R5, c->regs[5]);
309 }
310 
311 /**
312  *	z8530_rx - Handle a PIO receive event
313  *	@c: Z8530 channel to process
314  *
315  *	Receive handler for receiving in PIO mode. This is much like the
316  *	async one but not quite the same or as complex
317  *
318  *	Note: Its intended that this handler can easily be separated from
319  *	the main code to run realtime. That'll be needed for some machines
320  *	(eg to ever clock 64kbits on a sparc ;)).
321  *
322  *	The RT_LOCK macros don't do anything now. Keep the code covered
323  *	by them as short as possible in all circumstances - clocks cost
324  *	baud. The interrupt handler is assumed to be atomic w.r.t. to
325  *	other code - this is true in the RT case too.
326  *
327  *	We only cover the sync cases for this. If you want 2Mbit async
328  *	do it yourself but consider medical assistance first. This non DMA
329  *	synchronous mode is portable code. The DMA mode assumes PCI like
330  *	ISA DMA
331  *
332  *	Called with the device lock held
333  */
334 
z8530_rx(struct z8530_channel * c)335 static void z8530_rx(struct z8530_channel *c)
336 {
337 	u8 ch,stat;
338 
339 	while(1)
340 	{
341 		/* FIFO empty ? */
342 		if(!(read_zsreg(c, R0)&1))
343 			break;
344 		ch=read_zsdata(c);
345 		stat=read_zsreg(c, R1);
346 
347 		/*
348 		 *	Overrun ?
349 		 */
350 		if(c->count < c->max)
351 		{
352 			*c->dptr++=ch;
353 			c->count++;
354 		}
355 
356 		if(stat&END_FR)
357 		{
358 
359 			/*
360 			 *	Error ?
361 			 */
362 			if(stat&(Rx_OVR|CRC_ERR))
363 			{
364 				/* Rewind the buffer and return */
365 				if(c->skb)
366 					c->dptr=c->skb->data;
367 				c->count=0;
368 				if(stat&Rx_OVR)
369 				{
370 					printk(KERN_WARNING "%s: overrun\n", c->dev->name);
371 					c->rx_overrun++;
372 				}
373 				if(stat&CRC_ERR)
374 				{
375 					c->rx_crc_err++;
376 					/* printk("crc error\n"); */
377 				}
378 				/* Shove the frame upstream */
379 			}
380 			else
381 			{
382 				/*
383 				 *	Drop the lock for RX processing, or
384 		 		 *	there are deadlocks
385 		 		 */
386 				z8530_rx_done(c);
387 				write_zsctrl(c, RES_Rx_CRC);
388 			}
389 		}
390 	}
391 	/*
392 	 *	Clear irq
393 	 */
394 	write_zsctrl(c, ERR_RES);
395 	write_zsctrl(c, RES_H_IUS);
396 }
397 
398 
399 /**
400  *	z8530_tx - Handle a PIO transmit event
401  *	@c: Z8530 channel to process
402  *
403  *	Z8530 transmit interrupt handler for the PIO mode. The basic
404  *	idea is to attempt to keep the FIFO fed. We fill as many bytes
405  *	in as possible, its quite possible that we won't keep up with the
406  *	data rate otherwise.
407  */
408 
z8530_tx(struct z8530_channel * c)409 static void z8530_tx(struct z8530_channel *c)
410 {
411 	while(c->txcount) {
412 		/* FIFO full ? */
413 		if(!(read_zsreg(c, R0)&4))
414 			break;
415 		c->txcount--;
416 		/*
417 		 *	Shovel out the byte
418 		 */
419 		write_zsreg(c, R8, *c->tx_ptr++);
420 		write_zsctrl(c, RES_H_IUS);
421 		/* We are about to underflow */
422 		if(c->txcount==0)
423 		{
424 			write_zsctrl(c, RES_EOM_L);
425 			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
426 		}
427 	}
428 
429 
430 	/*
431 	 *	End of frame TX - fire another one
432 	 */
433 
434 	write_zsctrl(c, RES_Tx_P);
435 
436 	z8530_tx_done(c);
437 	write_zsctrl(c, RES_H_IUS);
438 }
439 
440 /**
441  *	z8530_status - Handle a PIO status exception
442  *	@chan: Z8530 channel to process
443  *
444  *	A status event occurred in PIO synchronous mode. There are several
445  *	reasons the chip will bother us here. A transmit underrun means we
446  *	failed to feed the chip fast enough and just broke a packet. A DCD
447  *	change is a line up or down. We communicate that back to the protocol
448  *	layer for synchronous PPP to renegotiate.
449  */
450 
z8530_status(struct z8530_channel * chan)451 static void z8530_status(struct z8530_channel *chan)
452 {
453 	u8 status, altered;
454 
455 	status=read_zsreg(chan, R0);
456 	altered=chan->status^status;
457 
458 	chan->status=status;
459 
460 	if(status&TxEOM)
461 	{
462 /*		printk("%s: Tx underrun.\n", chan->dev->name); */
463 		chan->stats.tx_fifo_errors++;
464 		write_zsctrl(chan, ERR_RES);
465 		z8530_tx_done(chan);
466 	}
467 
468 	if(altered&chan->dcdcheck)
469 	{
470 		if(status&chan->dcdcheck)
471 		{
472 			printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
473 			write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
474 			if(chan->netdevice &&
475 			    ((chan->netdevice->type == ARPHRD_HDLC) ||
476 			    (chan->netdevice->type == ARPHRD_PPP)))
477 				sppp_reopen(chan->netdevice);
478 		}
479 		else
480 		{
481 			printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
482 			write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
483 			z8530_flush_fifo(chan);
484 		}
485 
486 	}
487 	write_zsctrl(chan, RES_EXT_INT);
488 	write_zsctrl(chan, RES_H_IUS);
489 }
490 
491 struct z8530_irqhandler z8530_sync=
492 {
493 	z8530_rx,
494 	z8530_tx,
495 	z8530_status
496 };
497 
498 EXPORT_SYMBOL(z8530_sync);
499 
500 /**
501  *	z8530_dma_rx - Handle a DMA RX event
502  *	@chan: Channel to handle
503  *
504  *	Non bus mastering DMA interfaces for the Z8x30 devices. This
505  *	is really pretty PC specific. The DMA mode means that most receive
506  *	events are handled by the DMA hardware. We get a kick here only if
507  *	a frame ended.
508  */
509 
z8530_dma_rx(struct z8530_channel * chan)510 static void z8530_dma_rx(struct z8530_channel *chan)
511 {
512 	if(chan->rxdma_on)
513 	{
514 		/* Special condition check only */
515 		u8 status;
516 
517 		read_zsreg(chan, R7);
518 		read_zsreg(chan, R6);
519 
520 		status=read_zsreg(chan, R1);
521 
522 		if(status&END_FR)
523 		{
524 			z8530_rx_done(chan);	/* Fire up the next one */
525 		}
526 		write_zsctrl(chan, ERR_RES);
527 		write_zsctrl(chan, RES_H_IUS);
528 	}
529 	else
530 	{
531 		/* DMA is off right now, drain the slow way */
532 		z8530_rx(chan);
533 	}
534 }
535 
536 /**
537  *	z8530_dma_tx - Handle a DMA TX event
538  *	@chan:	The Z8530 channel to handle
539  *
540  *	We have received an interrupt while doing DMA transmissions. It
541  *	shouldn't happen. Scream loudly if it does.
542  */
543 
z8530_dma_tx(struct z8530_channel * chan)544 static void z8530_dma_tx(struct z8530_channel *chan)
545 {
546 	if(!chan->dma_tx)
547 	{
548 		printk(KERN_WARNING "Hey who turned the DMA off?\n");
549 		z8530_tx(chan);
550 		return;
551 	}
552 	/* This shouldnt occur in DMA mode */
553 	printk(KERN_ERR "DMA tx - bogus event!\n");
554 	z8530_tx(chan);
555 }
556 
557 /**
558  *	z8530_dma_status - Handle a DMA status exception
559  *	@chan: Z8530 channel to process
560  *
561  *	A status event occurred on the Z8530. We receive these for two reasons
562  *	when in DMA mode. Firstly if we finished a packet transfer we get one
563  *	and kick the next packet out. Secondly we may see a DCD change and
564  *	have to poke the protocol layer.
565  *
566  */
567 
z8530_dma_status(struct z8530_channel * chan)568 static void z8530_dma_status(struct z8530_channel *chan)
569 {
570 	u8 status, altered;
571 
572 	status=read_zsreg(chan, R0);
573 	altered=chan->status^status;
574 
575 	chan->status=status;
576 
577 
578 	if(chan->dma_tx)
579 	{
580 		if(status&TxEOM)
581 		{
582 			unsigned long flags;
583 
584 			flags=claim_dma_lock();
585 			disable_dma(chan->txdma);
586 			clear_dma_ff(chan->txdma);
587 			chan->txdma_on=0;
588 			release_dma_lock(flags);
589 			z8530_tx_done(chan);
590 		}
591 	}
592 	if(altered&chan->dcdcheck)
593 	{
594 		if(status&chan->dcdcheck)
595 		{
596 			printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
597 			write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
598 			if(chan->netdevice &&
599 			    ((chan->netdevice->type == ARPHRD_HDLC) ||
600 			    (chan->netdevice->type == ARPHRD_PPP)))
601 				sppp_reopen(chan->netdevice);
602 		}
603 		else
604 		{
605 			printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
606 			write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
607 			z8530_flush_fifo(chan);
608 		}
609 	}
610 	write_zsctrl(chan, RES_EXT_INT);
611 	write_zsctrl(chan, RES_H_IUS);
612 }
613 
614 struct z8530_irqhandler z8530_dma_sync=
615 {
616 	z8530_dma_rx,
617 	z8530_dma_tx,
618 	z8530_dma_status
619 };
620 
621 EXPORT_SYMBOL(z8530_dma_sync);
622 
623 struct z8530_irqhandler z8530_txdma_sync=
624 {
625 	z8530_rx,
626 	z8530_dma_tx,
627 	z8530_dma_status
628 };
629 
630 EXPORT_SYMBOL(z8530_txdma_sync);
631 
632 /**
633  *	z8530_rx_clear - Handle RX events from a stopped chip
634  *	@c: Z8530 channel to shut up
635  *
636  *	Receive interrupt vectors for a Z8530 that is in 'parked' mode.
637  *	For machines with PCI Z85x30 cards, or level triggered interrupts
638  *	(eg the MacII) we must clear the interrupt cause or die.
639  */
640 
641 
z8530_rx_clear(struct z8530_channel * c)642 static void z8530_rx_clear(struct z8530_channel *c)
643 {
644 	/*
645 	 *	Data and status bytes
646 	 */
647 	u8 stat;
648 
649 	read_zsdata(c);
650 	stat=read_zsreg(c, R1);
651 
652 	if(stat&END_FR)
653 		write_zsctrl(c, RES_Rx_CRC);
654 	/*
655 	 *	Clear irq
656 	 */
657 	write_zsctrl(c, ERR_RES);
658 	write_zsctrl(c, RES_H_IUS);
659 }
660 
661 /**
662  *	z8530_tx_clear - Handle TX events from a stopped chip
663  *	@c: Z8530 channel to shut up
664  *
665  *	Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
666  *	For machines with PCI Z85x30 cards, or level triggered interrupts
667  *	(eg the MacII) we must clear the interrupt cause or die.
668  */
669 
z8530_tx_clear(struct z8530_channel * c)670 static void z8530_tx_clear(struct z8530_channel *c)
671 {
672 	write_zsctrl(c, RES_Tx_P);
673 	write_zsctrl(c, RES_H_IUS);
674 }
675 
676 /**
677  *	z8530_status_clear - Handle status events from a stopped chip
678  *	@chan: Z8530 channel to shut up
679  *
680  *	Status interrupt vectors for a Z8530 that is in 'parked' mode.
681  *	For machines with PCI Z85x30 cards, or level triggered interrupts
682  *	(eg the MacII) we must clear the interrupt cause or die.
683  */
684 
z8530_status_clear(struct z8530_channel * chan)685 static void z8530_status_clear(struct z8530_channel *chan)
686 {
687 	u8 status=read_zsreg(chan, R0);
688 	if(status&TxEOM)
689 		write_zsctrl(chan, ERR_RES);
690 	write_zsctrl(chan, RES_EXT_INT);
691 	write_zsctrl(chan, RES_H_IUS);
692 }
693 
694 struct z8530_irqhandler z8530_nop=
695 {
696 	z8530_rx_clear,
697 	z8530_tx_clear,
698 	z8530_status_clear
699 };
700 
701 
702 EXPORT_SYMBOL(z8530_nop);
703 
704 /**
705  *	z8530_interrupt - Handle an interrupt from a Z8530
706  *	@irq: 	Interrupt number
707  *	@dev_id: The Z8530 device that is interrupting.
708  *	@regs: unused
709  *
710  *	A Z85[2]30 device has stuck its hand in the air for attention.
711  *	We scan both the channels on the chip for events and then call
712  *	the channel specific call backs for each channel that has events.
713  *	We have to use callback functions because the two channels can be
714  *	in different modes.
715  *
716  *	Locking is done for the handlers. Note that locking is done
717  *	at the chip level (the 5uS delay issue is per chip not per
718  *	channel). c->lock for both channels points to dev->lock
719  */
720 
z8530_interrupt(int irq,void * dev_id,struct pt_regs * regs)721 void z8530_interrupt(int irq, void *dev_id, struct pt_regs *regs)
722 {
723 	struct z8530_dev *dev=dev_id;
724 	u8 intr;
725 	static volatile int locker=0;
726 	int work=0;
727 	struct z8530_irqhandler *irqs=dev->chanA.irqs;
728 
729 	if(locker)
730 	{
731 		printk(KERN_ERR "IRQ re-enter\n");
732 		return;
733 	}
734 	locker=1;
735 
736 	spin_lock(&dev->lock);
737 
738 	while(++work<5000)
739 	{
740 
741 		intr = read_zsreg(&dev->chanA, R3);
742 		if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
743 			break;
744 
745 		/* This holds the IRQ status. On the 8530 you must read it from chan
746 		   A even though it applies to the whole chip */
747 
748 		/* Now walk the chip and see what it is wanting - it may be
749 		   an IRQ for someone else remember */
750 
751 		if(intr & (CHARxIP|CHATxIP|CHAEXT))
752 		{
753 			if(intr&CHARxIP)
754 				irqs->rx(&dev->chanA);
755 			if(intr&CHATxIP)
756 				irqs->tx(&dev->chanA);
757 			if(intr&CHAEXT)
758 				irqs->status(&dev->chanA);
759 		}
760 
761 		irqs=dev->chanB.irqs;
762 
763 		if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
764 		{
765 			if(intr&CHBRxIP)
766 				irqs->rx(&dev->chanB);
767 			if(intr&CHBTxIP)
768 				irqs->tx(&dev->chanB);
769 			if(intr&CHBEXT)
770 				irqs->status(&dev->chanB);
771 		}
772 	}
773 	spin_unlock(&dev->lock);
774 	if(work==5000)
775 		printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr);
776 	/* Ok all done */
777 	locker=0;
778 }
779 
780 EXPORT_SYMBOL(z8530_interrupt);
781 
782 static char reg_init[16]=
783 {
784 	0,0,0,0,
785 	0,0,0,0,
786 	0,0,0,0,
787 	0x55,0,0,0
788 };
789 
790 
791 /**
792  *	z8530_sync_open - Open a Z8530 channel for PIO
793  *	@dev:	The network interface we are using
794  *	@c:	The Z8530 channel to open in synchronous PIO mode
795  *
796  *	Switch a Z8530 into synchronous mode without DMA assist. We
797  *	raise the RTS/DTR and commence network operation.
798  */
799 
z8530_sync_open(struct net_device * dev,struct z8530_channel * c)800 int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
801 {
802 	unsigned long flags;
803 
804 	spin_lock_irqsave(c->lock, flags);
805 
806 	c->sync = 1;
807 	c->mtu = dev->mtu+64;
808 	c->count = 0;
809 	c->skb = NULL;
810 	c->skb2 = NULL;
811 	c->irqs = &z8530_sync;
812 
813 	/* This loads the double buffer up */
814 	z8530_rx_done(c);	/* Load the frame ring */
815 	z8530_rx_done(c);	/* Load the backup frame */
816 	z8530_rtsdtr(c,1);
817 	c->dma_tx = 0;
818 	c->regs[R1]|=TxINT_ENAB;
819 	write_zsreg(c, R1, c->regs[R1]);
820 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
821 
822 	spin_unlock_irqrestore(c->lock, flags);
823 	return 0;
824 }
825 
826 
827 EXPORT_SYMBOL(z8530_sync_open);
828 
829 /**
830  *	z8530_sync_close - Close a PIO Z8530 channel
831  *	@dev: Network device to close
832  *	@c: Z8530 channel to disassociate and move to idle
833  *
834  *	Close down a Z8530 interface and switch its interrupt handlers
835  *	to discard future events.
836  */
837 
z8530_sync_close(struct net_device * dev,struct z8530_channel * c)838 int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
839 {
840 	u8 chk;
841 	unsigned long flags;
842 
843 	spin_lock_irqsave(c->lock, flags);
844 	c->irqs = &z8530_nop;
845 	c->max = 0;
846 	c->sync = 0;
847 
848 	chk=read_zsreg(c,R0);
849 	write_zsreg(c, R3, c->regs[R3]);
850 	z8530_rtsdtr(c,0);
851 
852 	spin_unlock_irqrestore(c->lock, flags);
853 	return 0;
854 }
855 
856 EXPORT_SYMBOL(z8530_sync_close);
857 
858 /**
859  *	z8530_sync_dma_open - Open a Z8530 for DMA I/O
860  *	@dev: The network device to attach
861  *	@c: The Z8530 channel to configure in sync DMA mode.
862  *
863  *	Set up a Z85x30 device for synchronous DMA in both directions. Two
864  *	ISA DMA channels must be available for this to work. We assume ISA
865  *	DMA driven I/O and PC limits on access.
866  */
867 
z8530_sync_dma_open(struct net_device * dev,struct z8530_channel * c)868 int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
869 {
870 	unsigned long flags;
871 
872 	c->sync = 1;
873 	c->mtu = dev->mtu+64;
874 	c->count = 0;
875 	c->skb = NULL;
876 	c->skb2 = NULL;
877 	/*
878 	 *	Load the DMA interfaces up
879 	 */
880 	c->rxdma_on = 0;
881 	c->txdma_on = 0;
882 
883 	/*
884 	 *	Allocate the DMA flip buffers. Limit by page size.
885 	 *	Everyone runs 1500 mtu or less on wan links so this
886 	 *	should be fine.
887 	 */
888 
889 	if(c->mtu  > PAGE_SIZE/2)
890 		return -EMSGSIZE;
891 
892 	c->rx_buf[0]=(void *)get_free_page(GFP_KERNEL|GFP_DMA);
893 	if(c->rx_buf[0]==NULL)
894 		return -ENOBUFS;
895 	c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
896 
897 	c->tx_dma_buf[0]=(void *)get_free_page(GFP_KERNEL|GFP_DMA);
898 	if(c->tx_dma_buf[0]==NULL)
899 	{
900 		free_page((unsigned long)c->rx_buf[0]);
901 		c->rx_buf[0]=NULL;
902 		return -ENOBUFS;
903 	}
904 	c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
905 
906 	c->tx_dma_used=0;
907 	c->dma_tx = 1;
908 	c->dma_num=0;
909 	c->dma_ready=1;
910 
911 	/*
912 	 *	Enable DMA control mode
913 	 */
914 
915 	spin_lock_irqsave(c->lock, flags);
916 
917 	/*
918 	 *	TX DMA via DIR/REQ
919 	 */
920 
921 	c->regs[R14]|= DTRREQ;
922 	write_zsreg(c, R14, c->regs[R14]);
923 
924 	c->regs[R1]&= ~TxINT_ENAB;
925 	write_zsreg(c, R1, c->regs[R1]);
926 
927 	/*
928 	 *	RX DMA via W/Req
929 	 */
930 
931 	c->regs[R1]|= WT_FN_RDYFN;
932 	c->regs[R1]|= WT_RDY_RT;
933 	c->regs[R1]|= INT_ERR_Rx;
934 	c->regs[R1]&= ~TxINT_ENAB;
935 	write_zsreg(c, R1, c->regs[R1]);
936 	c->regs[R1]|= WT_RDY_ENAB;
937 	write_zsreg(c, R1, c->regs[R1]);
938 
939 	/*
940 	 *	DMA interrupts
941 	 */
942 
943 	/*
944 	 *	Set up the DMA configuration
945 	 */
946 
947 	flags=claim_dma_lock();
948 
949 	disable_dma(c->rxdma);
950 	clear_dma_ff(c->rxdma);
951 	set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
952 	set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
953 	set_dma_count(c->rxdma, c->mtu);
954 	enable_dma(c->rxdma);
955 
956 	disable_dma(c->txdma);
957 	clear_dma_ff(c->txdma);
958 	set_dma_mode(c->txdma, DMA_MODE_WRITE);
959 	disable_dma(c->txdma);
960 
961 	release_dma_lock(flags);
962 
963 	/*
964 	 *	Select the DMA interrupt handlers
965 	 */
966 
967 	c->rxdma_on = 1;
968 	c->txdma_on = 1;
969 	c->tx_dma_used = 1;
970 
971 	c->irqs = &z8530_dma_sync;
972 	z8530_rtsdtr(c,1);
973 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
974 
975 	spin_unlock_irqrestore(c->lock, flags);
976 
977 	return 0;
978 }
979 
980 EXPORT_SYMBOL(z8530_sync_dma_open);
981 
982 /**
983  *	z8530_sync_dma_close - Close down DMA I/O
984  *	@dev: Network device to detach
985  *	@c: Z8530 channel to move into discard mode
986  *
987  *	Shut down a DMA mode synchronous interface. Halt the DMA, and
988  *	free the buffers.
989  */
990 
z8530_sync_dma_close(struct net_device * dev,struct z8530_channel * c)991 int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
992 {
993 	u8 chk;
994 	unsigned long flags;
995 
996 	c->irqs = &z8530_nop;
997 	c->max = 0;
998 	c->sync = 0;
999 
1000 	/*
1001 	 *	Disable the PC DMA channels
1002 	 */
1003 
1004 	flags=claim_dma_lock();
1005 	disable_dma(c->rxdma);
1006 	clear_dma_ff(c->rxdma);
1007 
1008 	c->rxdma_on = 0;
1009 
1010 	disable_dma(c->txdma);
1011 	clear_dma_ff(c->txdma);
1012 	release_dma_lock(flags);
1013 
1014 	c->txdma_on = 0;
1015 	c->tx_dma_used = 0;
1016 
1017 	spin_lock_irqsave(c->lock, flags);
1018 
1019 	/*
1020 	 *	Disable DMA control mode
1021 	 */
1022 
1023 	c->regs[R1]&= ~WT_RDY_ENAB;
1024 	write_zsreg(c, R1, c->regs[R1]);
1025 	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1026 	c->regs[R1]|= INT_ALL_Rx;
1027 	write_zsreg(c, R1, c->regs[R1]);
1028 	c->regs[R14]&= ~DTRREQ;
1029 	write_zsreg(c, R14, c->regs[R14]);
1030 
1031 	if(c->rx_buf[0])
1032 	{
1033 		free_page((unsigned long)c->rx_buf[0]);
1034 		c->rx_buf[0]=NULL;
1035 	}
1036 	if(c->tx_dma_buf[0])
1037 	{
1038 		free_page((unsigned  long)c->tx_dma_buf[0]);
1039 		c->tx_dma_buf[0]=NULL;
1040 	}
1041 	chk=read_zsreg(c,R0);
1042 	write_zsreg(c, R3, c->regs[R3]);
1043 	z8530_rtsdtr(c,0);
1044 
1045 	spin_unlock_irqrestore(c->lock, flags);
1046 
1047 	return 0;
1048 }
1049 
1050 EXPORT_SYMBOL(z8530_sync_dma_close);
1051 
1052 /**
1053  *	z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1054  *	@dev: The network device to attach
1055  *	@c: The Z8530 channel to configure in sync DMA mode.
1056  *
1057  *	Set up a Z85x30 device for synchronous DMA tranmission. One
1058  *	ISA DMA channel must be available for this to work. The receive
1059  *	side is run in PIO mode, but then it has the bigger FIFO.
1060  */
1061 
z8530_sync_txdma_open(struct net_device * dev,struct z8530_channel * c)1062 int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1063 {
1064 	unsigned long flags;
1065 
1066 	printk("Opening sync interface for TX-DMA\n");
1067 	c->sync = 1;
1068 	c->mtu = dev->mtu+64;
1069 	c->count = 0;
1070 	c->skb = NULL;
1071 	c->skb2 = NULL;
1072 
1073 	/*
1074 	 *	Allocate the DMA flip buffers. Limit by page size.
1075 	 *	Everyone runs 1500 mtu or less on wan links so this
1076 	 *	should be fine.
1077 	 */
1078 
1079 	if(c->mtu  > PAGE_SIZE/2)
1080 		return -EMSGSIZE;
1081 
1082 	c->tx_dma_buf[0]=(void *)get_free_page(GFP_KERNEL|GFP_DMA);
1083 	if(c->tx_dma_buf[0]==NULL)
1084 		return -ENOBUFS;
1085 
1086 	c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1087 
1088 
1089 	spin_lock_irqsave(c->lock, flags);
1090 
1091 	/*
1092 	 *	Load the PIO receive ring
1093 	 */
1094 
1095 	z8530_rx_done(c);
1096 	z8530_rx_done(c);
1097 
1098  	/*
1099 	 *	Load the DMA interfaces up
1100 	 */
1101 
1102 	c->rxdma_on = 0;
1103 	c->txdma_on = 0;
1104 
1105 	c->tx_dma_used=0;
1106 	c->dma_num=0;
1107 	c->dma_ready=1;
1108 	c->dma_tx = 1;
1109 
1110  	/*
1111 	 *	Enable DMA control mode
1112 	 */
1113 
1114  	/*
1115 	 *	TX DMA via DIR/REQ
1116  	 */
1117 	c->regs[R14]|= DTRREQ;
1118 	write_zsreg(c, R14, c->regs[R14]);
1119 
1120 	c->regs[R1]&= ~TxINT_ENAB;
1121 	write_zsreg(c, R1, c->regs[R1]);
1122 
1123 	/*
1124 	 *	Set up the DMA configuration
1125 	 */
1126 
1127 	flags = claim_dma_lock();
1128 
1129 	disable_dma(c->txdma);
1130 	clear_dma_ff(c->txdma);
1131 	set_dma_mode(c->txdma, DMA_MODE_WRITE);
1132 	disable_dma(c->txdma);
1133 
1134 	release_dma_lock(flags);
1135 
1136 	/*
1137 	 *	Select the DMA interrupt handlers
1138 	 */
1139 
1140 	c->rxdma_on = 0;
1141 	c->txdma_on = 1;
1142 	c->tx_dma_used = 1;
1143 
1144 	c->irqs = &z8530_txdma_sync;
1145 	z8530_rtsdtr(c,1);
1146 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1147 	spin_unlock_irqrestore(c->lock, flags);
1148 
1149 	return 0;
1150 }
1151 
1152 EXPORT_SYMBOL(z8530_sync_txdma_open);
1153 
1154 /**
1155  *	z8530_sync_txdma_close - Close down a TX driven DMA channel
1156  *	@dev: Network device to detach
1157  *	@c: Z8530 channel to move into discard mode
1158  *
1159  *	Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1160  *	and  free the buffers.
1161  */
1162 
z8530_sync_txdma_close(struct net_device * dev,struct z8530_channel * c)1163 int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1164 {
1165 	unsigned long flags;
1166 	u8 chk;
1167 
1168 
1169 	spin_lock_irqsave(c->lock, flags);
1170 
1171 	c->irqs = &z8530_nop;
1172 	c->max = 0;
1173 	c->sync = 0;
1174 
1175 	/*
1176 	 *	Disable the PC DMA channels
1177 	 */
1178 
1179 	flags = claim_dma_lock();
1180 
1181 	disable_dma(c->txdma);
1182 	clear_dma_ff(c->txdma);
1183 	c->txdma_on = 0;
1184 	c->tx_dma_used = 0;
1185 
1186 	release_dma_lock(flags);
1187 
1188 	/*
1189 	 *	Disable DMA control mode
1190 	 */
1191 
1192 	c->regs[R1]&= ~WT_RDY_ENAB;
1193 	write_zsreg(c, R1, c->regs[R1]);
1194 	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1195 	c->regs[R1]|= INT_ALL_Rx;
1196 	write_zsreg(c, R1, c->regs[R1]);
1197 	c->regs[R14]&= ~DTRREQ;
1198 	write_zsreg(c, R14, c->regs[R14]);
1199 
1200 	if(c->tx_dma_buf[0])
1201 	{
1202 		free_page((unsigned long)c->tx_dma_buf[0]);
1203 		c->tx_dma_buf[0]=NULL;
1204 	}
1205 	chk=read_zsreg(c,R0);
1206 	write_zsreg(c, R3, c->regs[R3]);
1207 	z8530_rtsdtr(c,0);
1208 	return 0;
1209 }
1210 
1211 
1212 EXPORT_SYMBOL(z8530_sync_txdma_close);
1213 
1214 
1215 /*
1216  *	Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1217  *	it exists...
1218  */
1219 
1220 static char *z8530_type_name[]={
1221 	"Z8530",
1222 	"Z85C30",
1223 	"Z85230"
1224 };
1225 
1226 /**
1227  *	z8530_describe - Uniformly describe a Z8530 port
1228  *	@dev: Z8530 device to describe
1229  *	@mapping: string holding mapping type (eg "I/O" or "Mem")
1230  *	@io: the port value in question
1231  *
1232  *	Describe a Z8530 in a standard format. We must pass the I/O as
1233  *	the port offset isnt predictable. The main reason for this function
1234  *	is to try and get a common format of report.
1235  */
1236 
z8530_describe(struct z8530_dev * dev,char * mapping,unsigned long io)1237 void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1238 {
1239 	printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n",
1240 		dev->name,
1241 		z8530_type_name[dev->type],
1242 		mapping,
1243 		Z8530_PORT_OF(io),
1244 		dev->irq);
1245 }
1246 
1247 EXPORT_SYMBOL(z8530_describe);
1248 
1249 /*
1250  *	Locked operation part of the z8530 init code
1251  */
1252 
do_z8530_init(struct z8530_dev * dev)1253 static int do_z8530_init(struct z8530_dev *dev)
1254 {
1255 	/* NOP the interrupt handlers first - we might get a
1256 	   floating IRQ transition when we reset the chip */
1257 	dev->chanA.irqs=&z8530_nop;
1258 	dev->chanB.irqs=&z8530_nop;
1259 	dev->chanA.dcdcheck=DCD;
1260 	dev->chanB.dcdcheck=DCD;
1261 
1262 	/* Set up the chip level lock */
1263 	spin_lock_init(&dev->lock);
1264 	dev->chanA.lock = &dev->lock;
1265 	dev->chanB.lock = &dev->lock;
1266 
1267 	/* Reset the chip */
1268 	write_zsreg(&dev->chanA, R9, 0xC0);
1269 	udelay(200);
1270 	/* Now check its valid */
1271 	write_zsreg(&dev->chanA, R12, 0xAA);
1272 	if(read_zsreg(&dev->chanA, R12)!=0xAA)
1273 		return -ENODEV;
1274 	write_zsreg(&dev->chanA, R12, 0x55);
1275 	if(read_zsreg(&dev->chanA, R12)!=0x55)
1276 		return -ENODEV;
1277 
1278 	dev->type=Z8530;
1279 
1280 	/*
1281 	 *	See the application note.
1282 	 */
1283 
1284 	write_zsreg(&dev->chanA, R15, 0x01);
1285 
1286 	/*
1287 	 *	If we can set the low bit of R15 then
1288 	 *	the chip is enhanced.
1289 	 */
1290 
1291 	if(read_zsreg(&dev->chanA, R15)==0x01)
1292 	{
1293 		/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1294 		/* Put a char in the fifo */
1295 		write_zsreg(&dev->chanA, R8, 0);
1296 		if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1297 			dev->type = Z85230;	/* Has a FIFO */
1298 		else
1299 			dev->type = Z85C30;	/* Z85C30, 1 byte FIFO */
1300 	}
1301 
1302 	/*
1303 	 *	The code assumes R7' and friends are
1304 	 *	off. Use write_zsext() for these and keep
1305 	 *	this bit clear.
1306 	 */
1307 
1308 	write_zsreg(&dev->chanA, R15, 0);
1309 
1310 	/*
1311 	 *	At this point it looks like the chip is behaving
1312 	 */
1313 
1314 	memcpy(dev->chanA.regs, reg_init, 16);
1315 	memcpy(dev->chanB.regs, reg_init ,16);
1316 
1317 	return 0;
1318 }
1319 
1320 /**
1321  *	z8530_init - Initialise a Z8530 device
1322  *	@dev: Z8530 device to initialise.
1323  *
1324  *	Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1325  *	is present, identify the type and then program it to hopefully
1326  *	keep quite and behave. This matters a lot, a Z8530 in the wrong
1327  *	state will sometimes get into stupid modes generating 10Khz
1328  *	interrupt streams and the like.
1329  *
1330  *	We set the interrupt handler up to discard any events, in case
1331  *	we get them during reset or setp.
1332  *
1333  *	Return 0 for success, or a negative value indicating the problem
1334  *	in errno form.
1335  */
1336 
z8530_init(struct z8530_dev * dev)1337 int z8530_init(struct z8530_dev *dev)
1338 {
1339 	unsigned long flags;
1340 	int ret;
1341 
1342 	/* Set up the chip level lock */
1343 	spin_lock_init(&dev->lock);
1344 	dev->chanA.lock = &dev->lock;
1345 	dev->chanB.lock = &dev->lock;
1346 
1347 	spin_lock_irqsave(&dev->lock, flags);
1348 	ret = do_z8530_init(dev);
1349 	spin_unlock_irqrestore(&dev->lock, flags);
1350 
1351 	return ret;
1352 }
1353 
1354 
1355 EXPORT_SYMBOL(z8530_init);
1356 
1357 /**
1358  *	z8530_shutdown - Shutdown a Z8530 device
1359  *	@dev: The Z8530 chip to shutdown
1360  *
1361  *	We set the interrupt handlers to silence any interrupts. We then
1362  *	reset the chip and wait 100uS to be sure the reset completed. Just
1363  *	in case the caller then tries to do stuff.
1364  *
1365  *	This is called without the lock held
1366  */
1367 
z8530_shutdown(struct z8530_dev * dev)1368 int z8530_shutdown(struct z8530_dev *dev)
1369 {
1370 	unsigned long flags;
1371 	/* Reset the chip */
1372 
1373 	spin_lock_irqsave(&dev->lock, flags);
1374 	dev->chanA.irqs=&z8530_nop;
1375 	dev->chanB.irqs=&z8530_nop;
1376 	write_zsreg(&dev->chanA, R9, 0xC0);
1377 	/* We must lock the udelay, the chip is offlimits here */
1378 	udelay(100);
1379 	spin_unlock_irqrestore(&dev->lock, flags);
1380 	return 0;
1381 }
1382 
1383 EXPORT_SYMBOL(z8530_shutdown);
1384 
1385 /**
1386  *	z8530_channel_load - Load channel data
1387  *	@c: Z8530 channel to configure
1388  *	@rtable: table of register, value pairs
1389  *	FIXME: ioctl to allow user uploaded tables
1390  *
1391  *	Load a Z8530 channel up from the system data. We use +16 to
1392  *	indicate the "prime" registers. The value 255 terminates the
1393  *	table.
1394  */
1395 
z8530_channel_load(struct z8530_channel * c,u8 * rtable)1396 int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1397 {
1398 	unsigned long flags;
1399 
1400 	spin_lock_irqsave(c->lock, flags);
1401 
1402 	while(*rtable!=255)
1403 	{
1404 		int reg=*rtable++;
1405 		if(reg>0x0F)
1406 			write_zsreg(c, R15, c->regs[15]|1);
1407 		write_zsreg(c, reg&0x0F, *rtable);
1408 		if(reg>0x0F)
1409 			write_zsreg(c, R15, c->regs[15]&~1);
1410 		c->regs[reg]=*rtable++;
1411 	}
1412 	c->rx_function=z8530_null_rx;
1413 	c->skb=NULL;
1414 	c->tx_skb=NULL;
1415 	c->tx_next_skb=NULL;
1416 	c->mtu=1500;
1417 	c->max=0;
1418 	c->count=0;
1419 	c->status=read_zsreg(c, R0);
1420 	c->sync=1;
1421 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1422 
1423 	spin_unlock_irqrestore(c->lock, flags);
1424 	return 0;
1425 }
1426 
1427 EXPORT_SYMBOL(z8530_channel_load);
1428 
1429 
1430 /**
1431  *	z8530_tx_begin - Begin packet transmission
1432  *	@c: The Z8530 channel to kick
1433  *
1434  *	This is the speed sensitive side of transmission. If we are called
1435  *	and no buffer is being transmitted we commence the next buffer. If
1436  *	nothing is queued we idle the sync.
1437  *
1438  *	Note: We are handling this code path in the interrupt path, keep it
1439  *	fast or bad things will happen.
1440  *
1441  *	Called with the lock held.
1442  */
1443 
z8530_tx_begin(struct z8530_channel * c)1444 static void z8530_tx_begin(struct z8530_channel *c)
1445 {
1446 	unsigned long flags;
1447 	if(c->tx_skb)
1448 		return;
1449 
1450 	c->tx_skb=c->tx_next_skb;
1451 	c->tx_next_skb=NULL;
1452 	c->tx_ptr=c->tx_next_ptr;
1453 
1454 	if(c->tx_skb==NULL)
1455 	{
1456 		/* Idle on */
1457 		if(c->dma_tx)
1458 		{
1459 			flags=claim_dma_lock();
1460 			disable_dma(c->txdma);
1461 			/*
1462 			 *	Check if we crapped out.
1463 			 */
1464 			if(get_dma_residue(c->txdma))
1465 			{
1466 				c->stats.tx_dropped++;
1467 				c->stats.tx_fifo_errors++;
1468 			}
1469 			release_dma_lock(flags);
1470 		}
1471 		c->txcount=0;
1472 	}
1473 	else
1474 	{
1475 		c->txcount=c->tx_skb->len;
1476 
1477 
1478 		if(c->dma_tx)
1479 		{
1480 			/*
1481 			 *	FIXME. DMA is broken for the original 8530,
1482 			 *	on the older parts we need to set a flag and
1483 			 *	wait for a further TX interrupt to fire this
1484 			 *	stage off
1485 			 */
1486 
1487 			flags=claim_dma_lock();
1488 			disable_dma(c->txdma);
1489 
1490 			/*
1491 			 *	These two are needed by the 8530/85C30
1492 			 *	and must be issued when idling.
1493 			 */
1494 
1495 			if(c->dev->type!=Z85230)
1496 			{
1497 				write_zsctrl(c, RES_Tx_CRC);
1498 				write_zsctrl(c, RES_EOM_L);
1499 			}
1500 			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1501 			clear_dma_ff(c->txdma);
1502 			set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1503 			set_dma_count(c->txdma, c->txcount);
1504 			enable_dma(c->txdma);
1505 			release_dma_lock(flags);
1506 			write_zsctrl(c, RES_EOM_L);
1507 			write_zsreg(c, R5, c->regs[R5]|TxENAB);
1508 		}
1509 		else
1510 		{
1511 
1512 			/* ABUNDER off */
1513 			write_zsreg(c, R10, c->regs[10]);
1514 			write_zsctrl(c, RES_Tx_CRC);
1515 
1516 			while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1517 			{
1518 				write_zsreg(c, R8, *c->tx_ptr++);
1519 				c->txcount--;
1520 			}
1521 
1522 		}
1523 	}
1524 	/*
1525 	 *	Since we emptied tx_skb we can ask for more
1526 	 */
1527 	netif_wake_queue(c->netdevice);
1528 }
1529 
1530 /**
1531  *	z8530_tx_done - TX complete callback
1532  *	@c: The channel that completed a transmit.
1533  *
1534  *	This is called when we complete a packet send. We wake the queue,
1535  *	start the next packet going and then free the buffer of the existing
1536  *	packet. This code is fairly timing sensitive.
1537  *
1538  *	Called with the register lock held.
1539  */
1540 
z8530_tx_done(struct z8530_channel * c)1541 static void z8530_tx_done(struct z8530_channel *c)
1542 {
1543 	struct sk_buff *skb;
1544 
1545 	/* Actually this can happen.*/
1546 	if(c->tx_skb==NULL)
1547 		return;
1548 
1549 	skb=c->tx_skb;
1550 	c->tx_skb=NULL;
1551 	z8530_tx_begin(c);
1552 	c->stats.tx_packets++;
1553 	c->stats.tx_bytes+=skb->len;
1554 	dev_kfree_skb_irq(skb);
1555 }
1556 
1557 /**
1558  *	z8530_null_rx - Discard a packet
1559  *	@c: The channel the packet arrived on
1560  *	@skb: The buffer
1561  *
1562  *	We point the receive handler at this function when idle. Instead
1563  *	of syncppp processing the frames we get to throw them away.
1564  */
1565 
z8530_null_rx(struct z8530_channel * c,struct sk_buff * skb)1566 void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1567 {
1568 	dev_kfree_skb_any(skb);
1569 }
1570 
1571 EXPORT_SYMBOL(z8530_null_rx);
1572 
1573 /**
1574  *	z8530_rx_done - Receive completion callback
1575  *	@c: The channel that completed a receive
1576  *
1577  *	A new packet is complete. Our goal here is to get back into receive
1578  *	mode as fast as possible. On the Z85230 we could change to using
1579  *	ESCC mode, but on the older chips we have no choice. We flip to the
1580  *	new buffer immediately in DMA mode so that the DMA of the next
1581  *	frame can occur while we are copying the previous buffer to an sk_buff
1582  *
1583  *	Called with the lock held
1584  */
1585 
z8530_rx_done(struct z8530_channel * c)1586 static void z8530_rx_done(struct z8530_channel *c)
1587 {
1588 	struct sk_buff *skb;
1589 	int ct;
1590 
1591 	/*
1592 	 *	Is our receive engine in DMA mode
1593 	 */
1594 
1595 	if(c->rxdma_on)
1596 	{
1597 		/*
1598 		 *	Save the ready state and the buffer currently
1599 		 *	being used as the DMA target
1600 		 */
1601 
1602 		int ready=c->dma_ready;
1603 		unsigned char *rxb=c->rx_buf[c->dma_num];
1604 		unsigned long flags;
1605 
1606 		/*
1607 		 *	Complete this DMA. Neccessary to find the length
1608 		 */
1609 
1610 		flags=claim_dma_lock();
1611 
1612 		disable_dma(c->rxdma);
1613 		clear_dma_ff(c->rxdma);
1614 		c->rxdma_on=0;
1615 		ct=c->mtu-get_dma_residue(c->rxdma);
1616 		if(ct<0)
1617 			ct=2;	/* Shit happens.. */
1618 		c->dma_ready=0;
1619 
1620 		/*
1621 		 *	Normal case: the other slot is free, start the next DMA
1622 		 *	into it immediately.
1623 		 */
1624 
1625 		if(ready)
1626 		{
1627 			c->dma_num^=1;
1628 			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1629 			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1630 			set_dma_count(c->rxdma, c->mtu);
1631 			c->rxdma_on = 1;
1632 			enable_dma(c->rxdma);
1633 			/* Stop any frames that we missed the head of
1634 			   from passing */
1635 			write_zsreg(c, R0, RES_Rx_CRC);
1636 		}
1637 		else
1638 			/* Can't occur as we dont reenable the DMA irq until
1639 			   after the flip is done */
1640 			printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name);
1641 
1642 		release_dma_lock(flags);
1643 
1644 		/*
1645 		 *	Shove the old buffer into an sk_buff. We can't DMA
1646 		 *	directly into one on a PC - it might be above the 16Mb
1647 		 *	boundary. Optimisation - we could check to see if we
1648 		 *	can avoid the copy. Optimisation 2 - make the memcpy
1649 		 *	a copychecksum.
1650 		 */
1651 
1652 		skb=dev_alloc_skb(ct);
1653 		if(skb==NULL)
1654 		{
1655 			c->stats.rx_dropped++;
1656 			printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name);
1657 		}
1658 		else
1659 		{
1660 			skb_put(skb, ct);
1661 			memcpy(skb->data, rxb, ct);
1662 			c->stats.rx_packets++;
1663 			c->stats.rx_bytes+=ct;
1664 		}
1665 		c->dma_ready=1;
1666 	}
1667 	else
1668 	{
1669 		RT_LOCK;
1670 		skb=c->skb;
1671 
1672 		/*
1673 		 *	The game we play for non DMA is similar. We want to
1674 		 *	get the controller set up for the next packet as fast
1675 		 *	as possible. We potentially only have one byte + the
1676 		 *	fifo length for this. Thus we want to flip to the new
1677 		 *	buffer and then mess around copying and allocating
1678 		 *	things. For the current case it doesn't matter but
1679 		 *	if you build a system where the sync irq isnt blocked
1680 		 *	by the kernel IRQ disable then you need only block the
1681 		 *	sync IRQ for the RT_LOCK area.
1682 		 *
1683 		 */
1684 		ct=c->count;
1685 
1686 		c->skb = c->skb2;
1687 		c->count = 0;
1688 		c->max = c->mtu;
1689 		if(c->skb)
1690 		{
1691 			c->dptr = c->skb->data;
1692 			c->max = c->mtu;
1693 		}
1694 		else
1695 		{
1696 			c->count= 0;
1697 			c->max = 0;
1698 		}
1699 		RT_UNLOCK;
1700 
1701 		c->skb2 = dev_alloc_skb(c->mtu);
1702 		if(c->skb2==NULL)
1703 			printk(KERN_WARNING "%s: memory squeeze.\n",
1704 				c->netdevice->name);
1705 		else
1706 		{
1707 			skb_put(c->skb2,c->mtu);
1708 		}
1709 		c->stats.rx_packets++;
1710 		c->stats.rx_bytes+=ct;
1711 
1712 	}
1713 	/*
1714 	 *	If we received a frame we must now process it.
1715 	 */
1716 	if(skb)
1717 	{
1718 		skb_trim(skb, ct);
1719 		c->rx_function(c,skb);
1720 	}
1721 	else
1722 	{
1723 		c->stats.rx_dropped++;
1724 		printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1725 	}
1726 }
1727 
1728 /**
1729  *	spans_boundary - Check a packet can be ISA DMA'd
1730  *	@skb: The buffer to check
1731  *
1732  *	Returns true if the buffer cross a DMA boundary on a PC. The poor
1733  *	thing can only DMA within a 64K block not across the edges of it.
1734  */
1735 
spans_boundary(struct sk_buff * skb)1736 static inline int spans_boundary(struct sk_buff *skb)
1737 {
1738 	unsigned long a=(unsigned long)skb->data;
1739 	a^=(a+skb->len);
1740 	if(a&0x00010000)	/* If the 64K bit is different.. */
1741 		return 1;
1742 	return 0;
1743 }
1744 
1745 /**
1746  *	z8530_queue_xmit - Queue a packet
1747  *	@c: The channel to use
1748  *	@skb: The packet to kick down the channel
1749  *
1750  *	Queue a packet for transmission. Because we have rather
1751  *	hard to hit interrupt latencies for the Z85230 per packet
1752  *	even in DMA mode we do the flip to DMA buffer if needed here
1753  *	not in the IRQ.
1754  *
1755  *	Called from the network code. The lock is not held at this
1756  *	point.
1757  */
1758 
z8530_queue_xmit(struct z8530_channel * c,struct sk_buff * skb)1759 int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1760 {
1761 	unsigned long flags;
1762 
1763 	netif_stop_queue(c->netdevice);
1764 	if(c->tx_next_skb)
1765 	{
1766 		return 1;
1767 	}
1768 
1769 	/* PC SPECIFIC - DMA limits */
1770 
1771 	/*
1772 	 *	If we will DMA the transmit and its gone over the ISA bus
1773 	 *	limit, then copy to the flip buffer
1774 	 */
1775 
1776 	if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1777 	{
1778 		/*
1779 		 *	Send the flip buffer, and flip the flippy bit.
1780 		 *	We don't care which is used when just so long as
1781 		 *	we never use the same buffer twice in a row. Since
1782 		 *	only one buffer can be going out at a time the other
1783 		 *	has to be safe.
1784 		 */
1785 		c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1786 		c->tx_dma_used^=1;	/* Flip temp buffer */
1787 		memcpy(c->tx_next_ptr, skb->data, skb->len);
1788 	}
1789 	else
1790 		c->tx_next_ptr=skb->data;
1791 	RT_LOCK;
1792 	c->tx_next_skb=skb;
1793 	RT_UNLOCK;
1794 
1795 	spin_lock_irqsave(c->lock, flags);
1796 	z8530_tx_begin(c);
1797 	spin_unlock_irqrestore(c->lock, flags);
1798 
1799 	return 0;
1800 }
1801 
1802 EXPORT_SYMBOL(z8530_queue_xmit);
1803 
1804 /**
1805  *	z8530_get_stats - Get network statistics
1806  *	@c: The channel to use
1807  *
1808  *	Get the statistics block. We keep the statistics in software as
1809  *	the chip doesn't do it for us.
1810  *
1811  *	Locking is ignored here - we could lock for a copy but its
1812  *	not likely to be that big an issue
1813  */
1814 
z8530_get_stats(struct z8530_channel * c)1815 struct net_device_stats *z8530_get_stats(struct z8530_channel *c)
1816 {
1817 	return &c->stats;
1818 }
1819 
1820 EXPORT_SYMBOL(z8530_get_stats);
1821 
1822 /*
1823  *	Module support
1824  */
1825 static char banner[] __initdata = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1826 
z85230_init_driver(void)1827 static int __init z85230_init_driver(void)
1828 {
1829 	printk(banner);
1830 	return 0;
1831 }
1832 module_init(z85230_init_driver);
1833 
z85230_cleanup_driver(void)1834 static void __exit z85230_cleanup_driver(void)
1835 {
1836 }
1837 module_exit(z85230_cleanup_driver);
1838 
1839 MODULE_AUTHOR("Red Hat Inc.");
1840 MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1841 MODULE_LICENSE("GPL");
1842