1 /*
2 * linux/drivers/char/synclink.c
3 *
4 * $Id: synclink.c,v 3.21 2003/09/05 14:04:24 paulkf Exp $
5 *
6 * Device driver for Microgate SyncLink ISA and PCI
7 * high speed multiprotocol serial adapters.
8 *
9 * written by Paul Fulghum for Microgate Corporation
10 * paulkf@microgate.com
11 *
12 * Microgate and SyncLink are trademarks of Microgate Corporation
13 *
14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
15 *
16 * Original release 01/11/99
17 *
18 * This code is released under the GNU General Public License (GPL)
19 *
20 * This driver is primarily intended for use in synchronous
21 * HDLC mode. Asynchronous mode is also provided.
22 *
23 * When operating in synchronous mode, each call to mgsl_write()
24 * contains exactly one complete HDLC frame. Calling mgsl_put_char
25 * will start assembling an HDLC frame that will not be sent until
26 * mgsl_flush_chars or mgsl_write is called.
27 *
28 * Synchronous receive data is reported as complete frames. To accomplish
29 * this, the TTY flip buffer is bypassed (too small to hold largest
30 * frame and may fragment frames) and the line discipline
31 * receive entry point is called directly.
32 *
33 * This driver has been tested with a slightly modified ppp.c driver
34 * for synchronous PPP.
35 *
36 * 2000/02/16
37 * Added interface for syncppp.c driver (an alternate synchronous PPP
38 * implementation that also supports Cisco HDLC). Each device instance
39 * registers as a tty device AND a network device (if dosyncppp option
40 * is set for the device). The functionality is determined by which
41 * device interface is opened.
42 *
43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
53 * OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq))
57 #if defined(__i386__)
58 # define BREAKPOINT() asm(" int $3");
59 #else
60 # define BREAKPOINT() { }
61 #endif
62
63 #define MAX_ISA_DEVICES 10
64 #define MAX_PCI_DEVICES 10
65 #define MAX_TOTAL_DEVICES 20
66
67 #include <linux/config.h>
68 #include <linux/module.h>
69 #include <linux/version.h>
70 #include <linux/errno.h>
71 #include <linux/signal.h>
72 #include <linux/sched.h>
73 #include <linux/timer.h>
74 #include <linux/interrupt.h>
75 #include <linux/pci.h>
76 #include <linux/tty.h>
77 #include <linux/tty_flip.h>
78 #include <linux/serial.h>
79 #include <linux/major.h>
80 #include <linux/string.h>
81 #include <linux/fcntl.h>
82 #include <linux/ptrace.h>
83 #include <linux/ioport.h>
84 #include <linux/mm.h>
85 #include <linux/slab.h>
86
87 #include <linux/netdevice.h>
88
89 #include <linux/vmalloc.h>
90 #include <linux/init.h>
91 #include <asm/serial.h>
92
93 #include <linux/delay.h>
94 #include <linux/ioctl.h>
95
96 #include <asm/system.h>
97 #include <asm/io.h>
98 #include <asm/irq.h>
99 #include <asm/dma.h>
100 #include <asm/bitops.h>
101 #include <asm/types.h>
102 #include <linux/termios.h>
103 #include <linux/tqueue.h>
104
105 #ifdef CONFIG_SYNCLINK_SYNCPPP_MODULE
106 #define CONFIG_SYNCLINK_SYNCPPP 1
107 #endif
108
109 #ifdef CONFIG_SYNCLINK_SYNCPPP
110 #if LINUX_VERSION_CODE < VERSION(2,4,3)
111 #include "../net/wan/syncppp.h"
112 #else
113 #include <net/syncppp.h>
114 #endif
115 #endif
116
117 #include <asm/segment.h>
118 #define GET_USER(error,value,addr) error = get_user(value,addr)
119 #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
120 #define PUT_USER(error,value,addr) error = put_user(value,addr)
121 #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
122
123 #include <asm/uaccess.h>
124
125 #include "linux/synclink.h"
126
127 #define RCLRVALUE 0xffff
128
129 MGSL_PARAMS default_params = {
130 MGSL_MODE_HDLC, /* unsigned long mode */
131 0, /* unsigned char loopback; */
132 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
133 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
134 0, /* unsigned long clock_speed; */
135 0xff, /* unsigned char addr_filter; */
136 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
137 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
138 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
139 9600, /* unsigned long data_rate; */
140 8, /* unsigned char data_bits; */
141 1, /* unsigned char stop_bits; */
142 ASYNC_PARITY_NONE /* unsigned char parity; */
143 };
144
145 #define SHARED_MEM_ADDRESS_SIZE 0x40000
146 #define BUFFERLISTSIZE (PAGE_SIZE)
147 #define DMABUFFERSIZE (PAGE_SIZE)
148 #define MAXRXFRAMES 7
149
150 typedef struct _DMABUFFERENTRY
151 {
152 u32 phys_addr; /* 32-bit flat physical address of data buffer */
153 u16 count; /* buffer size/data count */
154 u16 status; /* Control/status field */
155 u16 rcc; /* character count field */
156 u16 reserved; /* padding required by 16C32 */
157 u32 link; /* 32-bit flat link to next buffer entry */
158 char *virt_addr; /* virtual address of data buffer */
159 u32 phys_entry; /* physical address of this buffer entry */
160 } DMABUFFERENTRY, *DMAPBUFFERENTRY;
161
162 /* The queue of BH actions to be performed */
163
164 #define BH_RECEIVE 1
165 #define BH_TRANSMIT 2
166 #define BH_STATUS 4
167
168 #define IO_PIN_SHUTDOWN_LIMIT 100
169
170 #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
171
172 struct _input_signal_events {
173 int ri_up;
174 int ri_down;
175 int dsr_up;
176 int dsr_down;
177 int dcd_up;
178 int dcd_down;
179 int cts_up;
180 int cts_down;
181 };
182
183 /* transmit holding buffer definitions*/
184 #define MAX_TX_HOLDING_BUFFERS 5
185 struct tx_holding_buffer {
186 int buffer_size;
187 unsigned char * buffer;
188 };
189
190
191 /*
192 * Device instance data structure
193 */
194
195 struct mgsl_struct {
196 void *if_ptr; /* General purpose pointer (used by SPPP) */
197 int magic;
198 int flags;
199 int count; /* count of opens */
200 int line;
201 int hw_version;
202 unsigned short close_delay;
203 unsigned short closing_wait; /* time to wait before closing */
204
205 struct mgsl_icount icount;
206
207 struct termios normal_termios;
208 struct termios callout_termios;
209
210 struct tty_struct *tty;
211 int timeout;
212 int x_char; /* xon/xoff character */
213 int blocked_open; /* # of blocked opens */
214 long session; /* Session of opening process */
215 long pgrp; /* pgrp of opening process */
216 u16 read_status_mask;
217 u16 ignore_status_mask;
218 unsigned char *xmit_buf;
219 int xmit_head;
220 int xmit_tail;
221 int xmit_cnt;
222
223 wait_queue_head_t open_wait;
224 wait_queue_head_t close_wait;
225
226 wait_queue_head_t status_event_wait_q;
227 wait_queue_head_t event_wait_q;
228 struct timer_list tx_timer; /* HDLC transmit timeout timer */
229 struct mgsl_struct *next_device; /* device list link */
230
231 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
232 struct tq_struct task; /* task structure for scheduling bh */
233
234 u32 EventMask; /* event trigger mask */
235 u32 RecordedEvents; /* pending events */
236
237 u32 max_frame_size; /* as set by device config */
238
239 u32 pending_bh;
240
241 int bh_running; /* Protection from multiple */
242 int isr_overflow;
243 int bh_requested;
244
245 int dcd_chkcount; /* check counts to prevent */
246 int cts_chkcount; /* too many IRQs if a signal */
247 int dsr_chkcount; /* is floating */
248 int ri_chkcount;
249
250 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
251 unsigned long buffer_list_phys;
252
253 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
254 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
255 unsigned int current_rx_buffer;
256
257 int num_tx_dma_buffers; /* number of tx dma frames required */
258 int tx_dma_buffers_used;
259 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
260 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
261 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
262 int current_tx_buffer; /* next tx dma buffer to be loaded */
263
264 unsigned char *intermediate_rxbuffer;
265
266 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
267 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
268 int put_tx_holding_index; /* next tx holding buffer to store user request */
269 int tx_holding_count; /* number of tx holding buffers waiting */
270 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
271
272 int rx_enabled;
273 int rx_overflow;
274 int rx_rcc_underrun;
275
276 int tx_enabled;
277 int tx_active;
278 u32 idle_mode;
279
280 u16 cmr_value;
281 u16 tcsr_value;
282
283 char device_name[25]; /* device instance name */
284
285 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
286 unsigned char bus; /* expansion bus number (zero based) */
287 unsigned char function; /* PCI device number */
288
289 unsigned int io_base; /* base I/O address of adapter */
290 unsigned int io_addr_size; /* size of the I/O address range */
291 int io_addr_requested; /* nonzero if I/O address requested */
292
293 unsigned int irq_level; /* interrupt level */
294 unsigned long irq_flags;
295 int irq_requested; /* nonzero if IRQ requested */
296
297 unsigned int dma_level; /* DMA channel */
298 int dma_requested; /* nonzero if dma channel requested */
299
300 u16 mbre_bit;
301 u16 loopback_bits;
302 u16 usc_idle_mode;
303
304 MGSL_PARAMS params; /* communications parameters */
305
306 unsigned char serial_signals; /* current serial signal states */
307
308 int irq_occurred; /* for diagnostics use */
309 unsigned int init_error; /* Initialization startup error (DIAGS) */
310 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
311
312 u32 last_mem_alloc;
313 unsigned char* memory_base; /* shared memory address (PCI only) */
314 u32 phys_memory_base;
315 int shared_mem_requested;
316
317 unsigned char* lcr_base; /* local config registers (PCI only) */
318 u32 phys_lcr_base;
319 u32 lcr_offset;
320 int lcr_mem_requested;
321
322 u32 misc_ctrl_value;
323 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
324 char char_buf[MAX_ASYNC_BUFFER_SIZE];
325 BOOLEAN drop_rts_on_tx_done;
326
327 BOOLEAN loopmode_insert_requested;
328 BOOLEAN loopmode_send_done_requested;
329
330 struct _input_signal_events input_signal_events;
331
332 /* SPPP/Cisco HDLC device parts */
333 int netcount;
334 int dosyncppp;
335 spinlock_t netlock;
336 #ifdef CONFIG_SYNCLINK_SYNCPPP
337 struct ppp_device pppdev;
338 char netname[10];
339 struct net_device *netdev;
340 struct net_device_stats netstats;
341 struct net_device netdevice;
342 #endif
343 };
344
345 #define MGSL_MAGIC 0x5401
346
347 /*
348 * The size of the serial xmit buffer is 1 page, or 4096 bytes
349 */
350 #ifndef SERIAL_XMIT_SIZE
351 #define SERIAL_XMIT_SIZE 4096
352 #endif
353
354 /*
355 * These macros define the offsets used in calculating the
356 * I/O address of the specified USC registers.
357 */
358
359
360 #define DCPIN 2 /* Bit 1 of I/O address */
361 #define SDPIN 4 /* Bit 2 of I/O address */
362
363 #define DCAR 0 /* DMA command/address register */
364 #define CCAR SDPIN /* channel command/address register */
365 #define DATAREG DCPIN + SDPIN /* serial data register */
366 #define MSBONLY 0x41
367 #define LSBONLY 0x40
368
369 /*
370 * These macros define the register address (ordinal number)
371 * used for writing address/value pairs to the USC.
372 */
373
374 #define CMR 0x02 /* Channel mode Register */
375 #define CCSR 0x04 /* Channel Command/status Register */
376 #define CCR 0x06 /* Channel Control Register */
377 #define PSR 0x08 /* Port status Register */
378 #define PCR 0x0a /* Port Control Register */
379 #define TMDR 0x0c /* Test mode Data Register */
380 #define TMCR 0x0e /* Test mode Control Register */
381 #define CMCR 0x10 /* Clock mode Control Register */
382 #define HCR 0x12 /* Hardware Configuration Register */
383 #define IVR 0x14 /* Interrupt Vector Register */
384 #define IOCR 0x16 /* Input/Output Control Register */
385 #define ICR 0x18 /* Interrupt Control Register */
386 #define DCCR 0x1a /* Daisy Chain Control Register */
387 #define MISR 0x1c /* Misc Interrupt status Register */
388 #define SICR 0x1e /* status Interrupt Control Register */
389 #define RDR 0x20 /* Receive Data Register */
390 #define RMR 0x22 /* Receive mode Register */
391 #define RCSR 0x24 /* Receive Command/status Register */
392 #define RICR 0x26 /* Receive Interrupt Control Register */
393 #define RSR 0x28 /* Receive Sync Register */
394 #define RCLR 0x2a /* Receive count Limit Register */
395 #define RCCR 0x2c /* Receive Character count Register */
396 #define TC0R 0x2e /* Time Constant 0 Register */
397 #define TDR 0x30 /* Transmit Data Register */
398 #define TMR 0x32 /* Transmit mode Register */
399 #define TCSR 0x34 /* Transmit Command/status Register */
400 #define TICR 0x36 /* Transmit Interrupt Control Register */
401 #define TSR 0x38 /* Transmit Sync Register */
402 #define TCLR 0x3a /* Transmit count Limit Register */
403 #define TCCR 0x3c /* Transmit Character count Register */
404 #define TC1R 0x3e /* Time Constant 1 Register */
405
406
407 /*
408 * MACRO DEFINITIONS FOR DMA REGISTERS
409 */
410
411 #define DCR 0x06 /* DMA Control Register (shared) */
412 #define DACR 0x08 /* DMA Array count Register (shared) */
413 #define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
414 #define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
415 #define DICR 0x18 /* DMA Interrupt Control Register (shared) */
416 #define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
417 #define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
418
419 #define TDMR 0x02 /* Transmit DMA mode Register */
420 #define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
421 #define TBCR 0x2a /* Transmit Byte count Register */
422 #define TARL 0x2c /* Transmit Address Register (low) */
423 #define TARU 0x2e /* Transmit Address Register (high) */
424 #define NTBCR 0x3a /* Next Transmit Byte count Register */
425 #define NTARL 0x3c /* Next Transmit Address Register (low) */
426 #define NTARU 0x3e /* Next Transmit Address Register (high) */
427
428 #define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
429 #define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
430 #define RBCR 0xaa /* Receive Byte count Register */
431 #define RARL 0xac /* Receive Address Register (low) */
432 #define RARU 0xae /* Receive Address Register (high) */
433 #define NRBCR 0xba /* Next Receive Byte count Register */
434 #define NRARL 0xbc /* Next Receive Address Register (low) */
435 #define NRARU 0xbe /* Next Receive Address Register (high) */
436
437
438 /*
439 * MACRO DEFINITIONS FOR MODEM STATUS BITS
440 */
441
442 #define MODEMSTATUS_DTR 0x80
443 #define MODEMSTATUS_DSR 0x40
444 #define MODEMSTATUS_RTS 0x20
445 #define MODEMSTATUS_CTS 0x10
446 #define MODEMSTATUS_RI 0x04
447 #define MODEMSTATUS_DCD 0x01
448
449
450 /*
451 * Channel Command/Address Register (CCAR) Command Codes
452 */
453
454 #define RTCmd_Null 0x0000
455 #define RTCmd_ResetHighestIus 0x1000
456 #define RTCmd_TriggerChannelLoadDma 0x2000
457 #define RTCmd_TriggerRxDma 0x2800
458 #define RTCmd_TriggerTxDma 0x3000
459 #define RTCmd_TriggerRxAndTxDma 0x3800
460 #define RTCmd_PurgeRxFifo 0x4800
461 #define RTCmd_PurgeTxFifo 0x5000
462 #define RTCmd_PurgeRxAndTxFifo 0x5800
463 #define RTCmd_LoadRcc 0x6800
464 #define RTCmd_LoadTcc 0x7000
465 #define RTCmd_LoadRccAndTcc 0x7800
466 #define RTCmd_LoadTC0 0x8800
467 #define RTCmd_LoadTC1 0x9000
468 #define RTCmd_LoadTC0AndTC1 0x9800
469 #define RTCmd_SerialDataLSBFirst 0xa000
470 #define RTCmd_SerialDataMSBFirst 0xa800
471 #define RTCmd_SelectBigEndian 0xb000
472 #define RTCmd_SelectLittleEndian 0xb800
473
474
475 /*
476 * DMA Command/Address Register (DCAR) Command Codes
477 */
478
479 #define DmaCmd_Null 0x0000
480 #define DmaCmd_ResetTxChannel 0x1000
481 #define DmaCmd_ResetRxChannel 0x1200
482 #define DmaCmd_StartTxChannel 0x2000
483 #define DmaCmd_StartRxChannel 0x2200
484 #define DmaCmd_ContinueTxChannel 0x3000
485 #define DmaCmd_ContinueRxChannel 0x3200
486 #define DmaCmd_PauseTxChannel 0x4000
487 #define DmaCmd_PauseRxChannel 0x4200
488 #define DmaCmd_AbortTxChannel 0x5000
489 #define DmaCmd_AbortRxChannel 0x5200
490 #define DmaCmd_InitTxChannel 0x7000
491 #define DmaCmd_InitRxChannel 0x7200
492 #define DmaCmd_ResetHighestDmaIus 0x8000
493 #define DmaCmd_ResetAllChannels 0x9000
494 #define DmaCmd_StartAllChannels 0xa000
495 #define DmaCmd_ContinueAllChannels 0xb000
496 #define DmaCmd_PauseAllChannels 0xc000
497 #define DmaCmd_AbortAllChannels 0xd000
498 #define DmaCmd_InitAllChannels 0xf000
499
500 #define TCmd_Null 0x0000
501 #define TCmd_ClearTxCRC 0x2000
502 #define TCmd_SelectTicrTtsaData 0x4000
503 #define TCmd_SelectTicrTxFifostatus 0x5000
504 #define TCmd_SelectTicrIntLevel 0x6000
505 #define TCmd_SelectTicrdma_level 0x7000
506 #define TCmd_SendFrame 0x8000
507 #define TCmd_SendAbort 0x9000
508 #define TCmd_EnableDleInsertion 0xc000
509 #define TCmd_DisableDleInsertion 0xd000
510 #define TCmd_ClearEofEom 0xe000
511 #define TCmd_SetEofEom 0xf000
512
513 #define RCmd_Null 0x0000
514 #define RCmd_ClearRxCRC 0x2000
515 #define RCmd_EnterHuntmode 0x3000
516 #define RCmd_SelectRicrRtsaData 0x4000
517 #define RCmd_SelectRicrRxFifostatus 0x5000
518 #define RCmd_SelectRicrIntLevel 0x6000
519 #define RCmd_SelectRicrdma_level 0x7000
520
521 /*
522 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
523 */
524
525 #define RECEIVE_STATUS BIT5
526 #define RECEIVE_DATA BIT4
527 #define TRANSMIT_STATUS BIT3
528 #define TRANSMIT_DATA BIT2
529 #define IO_PIN BIT1
530 #define MISC BIT0
531
532
533 /*
534 * Receive status Bits in Receive Command/status Register RCSR
535 */
536
537 #define RXSTATUS_SHORT_FRAME BIT8
538 #define RXSTATUS_CODE_VIOLATION BIT8
539 #define RXSTATUS_EXITED_HUNT BIT7
540 #define RXSTATUS_IDLE_RECEIVED BIT6
541 #define RXSTATUS_BREAK_RECEIVED BIT5
542 #define RXSTATUS_ABORT_RECEIVED BIT5
543 #define RXSTATUS_RXBOUND BIT4
544 #define RXSTATUS_CRC_ERROR BIT3
545 #define RXSTATUS_FRAMING_ERROR BIT3
546 #define RXSTATUS_ABORT BIT2
547 #define RXSTATUS_PARITY_ERROR BIT2
548 #define RXSTATUS_OVERRUN BIT1
549 #define RXSTATUS_DATA_AVAILABLE BIT0
550 #define RXSTATUS_ALL 0x01f6
551 #define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
552
553 /*
554 * Values for setting transmit idle mode in
555 * Transmit Control/status Register (TCSR)
556 */
557 #define IDLEMODE_FLAGS 0x0000
558 #define IDLEMODE_ALT_ONE_ZERO 0x0100
559 #define IDLEMODE_ZERO 0x0200
560 #define IDLEMODE_ONE 0x0300
561 #define IDLEMODE_ALT_MARK_SPACE 0x0500
562 #define IDLEMODE_SPACE 0x0600
563 #define IDLEMODE_MARK 0x0700
564 #define IDLEMODE_MASK 0x0700
565
566 /*
567 * IUSC revision identifiers
568 */
569 #define IUSC_SL1660 0x4d44
570 #define IUSC_PRE_SL1660 0x4553
571
572 /*
573 * Transmit status Bits in Transmit Command/status Register (TCSR)
574 */
575
576 #define TCSR_PRESERVE 0x0F00
577
578 #define TCSR_UNDERWAIT BIT11
579 #define TXSTATUS_PREAMBLE_SENT BIT7
580 #define TXSTATUS_IDLE_SENT BIT6
581 #define TXSTATUS_ABORT_SENT BIT5
582 #define TXSTATUS_EOF_SENT BIT4
583 #define TXSTATUS_EOM_SENT BIT4
584 #define TXSTATUS_CRC_SENT BIT3
585 #define TXSTATUS_ALL_SENT BIT2
586 #define TXSTATUS_UNDERRUN BIT1
587 #define TXSTATUS_FIFO_EMPTY BIT0
588 #define TXSTATUS_ALL 0x00fa
589 #define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
590
591
592 #define MISCSTATUS_RXC_LATCHED BIT15
593 #define MISCSTATUS_RXC BIT14
594 #define MISCSTATUS_TXC_LATCHED BIT13
595 #define MISCSTATUS_TXC BIT12
596 #define MISCSTATUS_RI_LATCHED BIT11
597 #define MISCSTATUS_RI BIT10
598 #define MISCSTATUS_DSR_LATCHED BIT9
599 #define MISCSTATUS_DSR BIT8
600 #define MISCSTATUS_DCD_LATCHED BIT7
601 #define MISCSTATUS_DCD BIT6
602 #define MISCSTATUS_CTS_LATCHED BIT5
603 #define MISCSTATUS_CTS BIT4
604 #define MISCSTATUS_RCC_UNDERRUN BIT3
605 #define MISCSTATUS_DPLL_NO_SYNC BIT2
606 #define MISCSTATUS_BRG1_ZERO BIT1
607 #define MISCSTATUS_BRG0_ZERO BIT0
608
609 #define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
610 #define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
611
612 #define SICR_RXC_ACTIVE BIT15
613 #define SICR_RXC_INACTIVE BIT14
614 #define SICR_RXC (BIT15+BIT14)
615 #define SICR_TXC_ACTIVE BIT13
616 #define SICR_TXC_INACTIVE BIT12
617 #define SICR_TXC (BIT13+BIT12)
618 #define SICR_RI_ACTIVE BIT11
619 #define SICR_RI_INACTIVE BIT10
620 #define SICR_RI (BIT11+BIT10)
621 #define SICR_DSR_ACTIVE BIT9
622 #define SICR_DSR_INACTIVE BIT8
623 #define SICR_DSR (BIT9+BIT8)
624 #define SICR_DCD_ACTIVE BIT7
625 #define SICR_DCD_INACTIVE BIT6
626 #define SICR_DCD (BIT7+BIT6)
627 #define SICR_CTS_ACTIVE BIT5
628 #define SICR_CTS_INACTIVE BIT4
629 #define SICR_CTS (BIT5+BIT4)
630 #define SICR_RCC_UNDERFLOW BIT3
631 #define SICR_DPLL_NO_SYNC BIT2
632 #define SICR_BRG1_ZERO BIT1
633 #define SICR_BRG0_ZERO BIT0
634
635 void usc_DisableMasterIrqBit( struct mgsl_struct *info );
636 void usc_EnableMasterIrqBit( struct mgsl_struct *info );
637 void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
638 void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
639 void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
640
641 #define usc_EnableInterrupts( a, b ) \
642 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
643
644 #define usc_DisableInterrupts( a, b ) \
645 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
646
647 #define usc_EnableMasterIrqBit(a) \
648 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
649
650 #define usc_DisableMasterIrqBit(a) \
651 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
652
653 #define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
654
655 /*
656 * Transmit status Bits in Transmit Control status Register (TCSR)
657 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
658 */
659
660 #define TXSTATUS_PREAMBLE_SENT BIT7
661 #define TXSTATUS_IDLE_SENT BIT6
662 #define TXSTATUS_ABORT_SENT BIT5
663 #define TXSTATUS_EOF BIT4
664 #define TXSTATUS_CRC_SENT BIT3
665 #define TXSTATUS_ALL_SENT BIT2
666 #define TXSTATUS_UNDERRUN BIT1
667 #define TXSTATUS_FIFO_EMPTY BIT0
668
669 #define DICR_MASTER BIT15
670 #define DICR_TRANSMIT BIT0
671 #define DICR_RECEIVE BIT1
672
673 #define usc_EnableDmaInterrupts(a,b) \
674 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
675
676 #define usc_DisableDmaInterrupts(a,b) \
677 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
678
679 #define usc_EnableStatusIrqs(a,b) \
680 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
681
682 #define usc_DisablestatusIrqs(a,b) \
683 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
684
685 /* Transmit status Bits in Transmit Control status Register (TCSR) */
686 /* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
687
688
689 #define DISABLE_UNCONDITIONAL 0
690 #define DISABLE_END_OF_FRAME 1
691 #define ENABLE_UNCONDITIONAL 2
692 #define ENABLE_AUTO_CTS 3
693 #define ENABLE_AUTO_DCD 3
694 #define usc_EnableTransmitter(a,b) \
695 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
696 #define usc_EnableReceiver(a,b) \
697 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
698
699 u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
700 void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
701 void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
702
703 u16 usc_InReg( struct mgsl_struct *info, u16 Port );
704 void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
705 void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
706 void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
707 void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
708
709 #define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
710 #define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
711
712 #define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
713
714 void usc_process_rxoverrun_sync( struct mgsl_struct *info );
715 void usc_start_receiver( struct mgsl_struct *info );
716 void usc_stop_receiver( struct mgsl_struct *info );
717
718 void usc_start_transmitter( struct mgsl_struct *info );
719 void usc_stop_transmitter( struct mgsl_struct *info );
720 void usc_set_txidle( struct mgsl_struct *info );
721 void usc_load_txfifo( struct mgsl_struct *info );
722
723 void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
724 void usc_enable_loopback( struct mgsl_struct *info, int enable );
725
726 void usc_get_serial_signals( struct mgsl_struct *info );
727 void usc_set_serial_signals( struct mgsl_struct *info );
728
729 void usc_reset( struct mgsl_struct *info );
730
731 void usc_set_sync_mode( struct mgsl_struct *info );
732 void usc_set_sdlc_mode( struct mgsl_struct *info );
733 void usc_set_async_mode( struct mgsl_struct *info );
734 void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
735
736 void usc_loopback_frame( struct mgsl_struct *info );
737
738 void mgsl_tx_timeout(unsigned long context);
739
740
741 void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
742 void usc_loopmode_insert_request( struct mgsl_struct * info );
743 int usc_loopmode_active( struct mgsl_struct * info);
744 void usc_loopmode_send_done( struct mgsl_struct * info );
745 int usc_loopmode_send_active( struct mgsl_struct * info );
746
747 int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
748
749 #ifdef CONFIG_SYNCLINK_SYNCPPP
750 /* SPPP/HDLC stuff */
751 void mgsl_sppp_init(struct mgsl_struct *info);
752 void mgsl_sppp_delete(struct mgsl_struct *info);
753 int mgsl_sppp_open(struct net_device *d);
754 int mgsl_sppp_close(struct net_device *d);
755 void mgsl_sppp_tx_timeout(struct net_device *d);
756 int mgsl_sppp_tx(struct sk_buff *skb, struct net_device *d);
757 void mgsl_sppp_rx_done(struct mgsl_struct *info, char *buf, int size);
758 void mgsl_sppp_tx_done(struct mgsl_struct *info);
759 int mgsl_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
760 struct net_device_stats *mgsl_net_stats(struct net_device *dev);
761 #endif
762
763 /*
764 * Defines a BUS descriptor value for the PCI adapter
765 * local bus address ranges.
766 */
767
768 #define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
769 (0x00400020 + \
770 ((WrHold) << 30) + \
771 ((WrDly) << 28) + \
772 ((RdDly) << 26) + \
773 ((Nwdd) << 20) + \
774 ((Nwad) << 15) + \
775 ((Nxda) << 13) + \
776 ((Nrdd) << 11) + \
777 ((Nrad) << 6) )
778
779 void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
780
781 /*
782 * Adapter diagnostic routines
783 */
784 BOOLEAN mgsl_register_test( struct mgsl_struct *info );
785 BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
786 BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
787 BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
788 int mgsl_adapter_test( struct mgsl_struct *info );
789
790 /*
791 * device and resource management routines
792 */
793 int mgsl_claim_resources(struct mgsl_struct *info);
794 void mgsl_release_resources(struct mgsl_struct *info);
795 void mgsl_add_device(struct mgsl_struct *info);
796 struct mgsl_struct* mgsl_allocate_device(void);
797 int mgsl_enum_isa_devices(void);
798
799 /*
800 * DMA buffer manupulation functions.
801 */
802 void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
803 int mgsl_get_rx_frame( struct mgsl_struct *info );
804 int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
805 void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
806 void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
807 int num_free_tx_dma_buffers(struct mgsl_struct *info);
808 void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
809 void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
810
811 /*
812 * DMA and Shared Memory buffer allocation and formatting
813 */
814 int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
815 void mgsl_free_dma_buffers(struct mgsl_struct *info);
816 int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
817 void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
818 int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
819 void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
820 int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
821 void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
822 int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
823 void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
824 int load_next_tx_holding_buffer(struct mgsl_struct *info);
825 int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
826
827 /*
828 * Bottom half interrupt handlers
829 */
830 void mgsl_bh_handler(void* Context);
831 void mgsl_bh_receive(struct mgsl_struct *info);
832 void mgsl_bh_transmit(struct mgsl_struct *info);
833 void mgsl_bh_status(struct mgsl_struct *info);
834
835 /*
836 * Interrupt handler routines and dispatch table.
837 */
838 void mgsl_isr_null( struct mgsl_struct *info );
839 void mgsl_isr_transmit_data( struct mgsl_struct *info );
840 void mgsl_isr_receive_data( struct mgsl_struct *info );
841 void mgsl_isr_receive_status( struct mgsl_struct *info );
842 void mgsl_isr_transmit_status( struct mgsl_struct *info );
843 void mgsl_isr_io_pin( struct mgsl_struct *info );
844 void mgsl_isr_misc( struct mgsl_struct *info );
845 void mgsl_isr_receive_dma( struct mgsl_struct *info );
846 void mgsl_isr_transmit_dma( struct mgsl_struct *info );
847
848 typedef void (*isr_dispatch_func)(struct mgsl_struct *);
849
850 isr_dispatch_func UscIsrTable[7] =
851 {
852 mgsl_isr_null,
853 mgsl_isr_misc,
854 mgsl_isr_io_pin,
855 mgsl_isr_transmit_data,
856 mgsl_isr_transmit_status,
857 mgsl_isr_receive_data,
858 mgsl_isr_receive_status
859 };
860
861 /*
862 * ioctl call handlers
863 */
864 static int set_modem_info(struct mgsl_struct * info, unsigned int cmd,
865 unsigned int *value);
866 static int get_modem_info(struct mgsl_struct * info, unsigned int *value);
867 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
868 *user_icount);
869 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS *user_params);
870 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS *new_params);
871 static int mgsl_get_txidle(struct mgsl_struct * info, int*idle_mode);
872 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
873 static int mgsl_txenable(struct mgsl_struct * info, int enable);
874 static int mgsl_txabort(struct mgsl_struct * info);
875 static int mgsl_rxenable(struct mgsl_struct * info, int enable);
876 static int mgsl_wait_event(struct mgsl_struct * info, int * mask);
877 static int mgsl_loopmode_send_done( struct mgsl_struct * info );
878
879 #define jiffies_from_ms(a) ((((a) * HZ)/1000)+1)
880
881 /*
882 * Global linked list of SyncLink devices
883 */
884 struct mgsl_struct *mgsl_device_list;
885 static int mgsl_device_count;
886
887 /*
888 * Set this param to non-zero to load eax with the
889 * .text section address and breakpoint on module load.
890 * This is useful for use with gdb and add-symbol-file command.
891 */
892 static int break_on_load;
893
894 /*
895 * Driver major number, defaults to zero to get auto
896 * assigned major number. May be forced as module parameter.
897 */
898 static int ttymajor;
899
900 static int cuamajor;
901
902 /*
903 * Array of user specified options for ISA adapters.
904 */
905 static int io[MAX_ISA_DEVICES];
906 static int irq[MAX_ISA_DEVICES];
907 static int dma[MAX_ISA_DEVICES];
908 static int debug_level;
909 static int maxframe[MAX_TOTAL_DEVICES];
910 static int dosyncppp[MAX_TOTAL_DEVICES];
911 static int txdmabufs[MAX_TOTAL_DEVICES];
912 static int txholdbufs[MAX_TOTAL_DEVICES];
913
914 MODULE_PARM(break_on_load,"i");
915 MODULE_PARM(ttymajor,"i");
916 MODULE_PARM(cuamajor,"i");
917 MODULE_PARM(io,"1-" __MODULE_STRING(MAX_ISA_DEVICES) "i");
918 MODULE_PARM(irq,"1-" __MODULE_STRING(MAX_ISA_DEVICES) "i");
919 MODULE_PARM(dma,"1-" __MODULE_STRING(MAX_ISA_DEVICES) "i");
920 MODULE_PARM(debug_level,"i");
921 MODULE_PARM(maxframe,"1-" __MODULE_STRING(MAX_TOTAL_DEVICES) "i");
922 MODULE_PARM(dosyncppp,"1-" __MODULE_STRING(MAX_TOTAL_DEVICES) "i");
923 MODULE_PARM(txdmabufs,"1-" __MODULE_STRING(MAX_TOTAL_DEVICES) "i");
924 MODULE_PARM(txholdbufs,"1-" __MODULE_STRING(MAX_TOTAL_DEVICES) "i");
925
926 static char *driver_name = "SyncLink serial driver";
927 static char *driver_version = "$Revision: 3.21 $";
928
929 static int __init synclink_init_one (struct pci_dev *dev,
930 const struct pci_device_id *ent);
931 static void __devexit synclink_remove_one (struct pci_dev *dev);
932
933 static struct pci_device_id synclink_pci_tbl[] __devinitdata = {
934 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
935 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
936 { 0, }, /* terminate list */
937 };
938 MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
939
940 #ifdef MODULE_LICENSE
941 MODULE_LICENSE("GPL");
942 #endif
943
944 static struct pci_driver synclink_pci_driver = {
945 name: "synclink",
946 id_table: synclink_pci_tbl,
947 probe: synclink_init_one,
948 remove: __devexit_p(synclink_remove_one),
949 };
950
951 static struct tty_driver serial_driver, callout_driver;
952 static int serial_refcount;
953
954 /* number of characters left in xmit buffer before we ask for more */
955 #define WAKEUP_CHARS 256
956
957
958 static void mgsl_change_params(struct mgsl_struct *info);
959 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
960
961 static struct tty_struct *serial_table[MAX_TOTAL_DEVICES];
962 static struct termios *serial_termios[MAX_TOTAL_DEVICES];
963 static struct termios *serial_termios_locked[MAX_TOTAL_DEVICES];
964
965 #ifndef MIN
966 #define MIN(a,b) ((a) < (b) ? (a) : (b))
967 #endif
968
969 /*
970 * 1st function defined in .text section. Calling this function in
971 * init_module() followed by a breakpoint allows a remote debugger
972 * (gdb) to get the .text address for the add-symbol-file command.
973 * This allows remote debugging of dynamically loadable modules.
974 */
975 void* mgsl_get_text_ptr(void);
mgsl_get_text_ptr()976 void* mgsl_get_text_ptr() {return mgsl_get_text_ptr;}
977
978 /*
979 * tmp_buf is used as a temporary buffer by mgsl_write. We need to
980 * lock it in case the COPY_FROM_USER blocks while swapping in a page,
981 * and some other program tries to do a serial write at the same time.
982 * Since the lock will only come under contention when the system is
983 * swapping and available memory is low, it makes sense to share one
984 * buffer across all the serial ioports, since it significantly saves
985 * memory if large numbers of serial ports are open.
986 */
987 static unsigned char *tmp_buf;
988 static DECLARE_MUTEX(tmp_buf_sem);
989
mgsl_paranoia_check(struct mgsl_struct * info,kdev_t device,const char * routine)990 static inline int mgsl_paranoia_check(struct mgsl_struct *info,
991 kdev_t device, const char *routine)
992 {
993 #ifdef MGSL_PARANOIA_CHECK
994 static const char *badmagic =
995 "Warning: bad magic number for mgsl struct (%s) in %s\n";
996 static const char *badinfo =
997 "Warning: null mgsl_struct for (%s) in %s\n";
998
999 if (!info) {
1000 printk(badinfo, kdevname(device), routine);
1001 return 1;
1002 }
1003 if (info->magic != MGSL_MAGIC) {
1004 printk(badmagic, kdevname(device), routine);
1005 return 1;
1006 }
1007 #else
1008 if (!info)
1009 return 1;
1010 #endif
1011 return 0;
1012 }
1013
1014 /**
1015 * line discipline callback wrappers
1016 *
1017 * The wrappers maintain line discipline references
1018 * while calling into the line discipline.
1019 *
1020 * ldisc_receive_buf - pass receive data to line discipline
1021 */
1022
ldisc_receive_buf(struct tty_struct * tty,const __u8 * data,char * flags,int count)1023 static void ldisc_receive_buf(struct tty_struct *tty,
1024 const __u8 *data, char *flags, int count)
1025 {
1026 struct tty_ldisc *ld;
1027 if (!tty)
1028 return;
1029 ld = tty_ldisc_ref(tty);
1030 if (ld) {
1031 if (ld->receive_buf)
1032 ld->receive_buf(tty, data, flags, count);
1033 tty_ldisc_deref(ld);
1034 }
1035 }
1036
1037 /* mgsl_stop() throttle (stop) transmitter
1038 *
1039 * Arguments: tty pointer to tty info structure
1040 * Return Value: None
1041 */
mgsl_stop(struct tty_struct * tty)1042 static void mgsl_stop(struct tty_struct *tty)
1043 {
1044 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1045 unsigned long flags;
1046
1047 if (mgsl_paranoia_check(info, tty->device, "mgsl_stop"))
1048 return;
1049
1050 if ( debug_level >= DEBUG_LEVEL_INFO )
1051 printk("mgsl_stop(%s)\n",info->device_name);
1052
1053 spin_lock_irqsave(&info->irq_spinlock,flags);
1054 if (info->tx_enabled)
1055 usc_stop_transmitter(info);
1056 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1057
1058 } /* end of mgsl_stop() */
1059
1060 /* mgsl_start() release (start) transmitter
1061 *
1062 * Arguments: tty pointer to tty info structure
1063 * Return Value: None
1064 */
mgsl_start(struct tty_struct * tty)1065 static void mgsl_start(struct tty_struct *tty)
1066 {
1067 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1068 unsigned long flags;
1069
1070 if (mgsl_paranoia_check(info, tty->device, "mgsl_start"))
1071 return;
1072
1073 if ( debug_level >= DEBUG_LEVEL_INFO )
1074 printk("mgsl_start(%s)\n",info->device_name);
1075
1076 spin_lock_irqsave(&info->irq_spinlock,flags);
1077 if (!info->tx_enabled)
1078 usc_start_transmitter(info);
1079 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1080
1081 } /* end of mgsl_start() */
1082
1083 /*
1084 * Bottom half work queue access functions
1085 */
1086
1087 /* mgsl_bh_action() Return next bottom half action to perform.
1088 * Return Value: BH action code or 0 if nothing to do.
1089 */
mgsl_bh_action(struct mgsl_struct * info)1090 int mgsl_bh_action(struct mgsl_struct *info)
1091 {
1092 unsigned long flags;
1093 int rc = 0;
1094
1095 spin_lock_irqsave(&info->irq_spinlock,flags);
1096
1097 if (info->pending_bh & BH_RECEIVE) {
1098 info->pending_bh &= ~BH_RECEIVE;
1099 rc = BH_RECEIVE;
1100 } else if (info->pending_bh & BH_TRANSMIT) {
1101 info->pending_bh &= ~BH_TRANSMIT;
1102 rc = BH_TRANSMIT;
1103 } else if (info->pending_bh & BH_STATUS) {
1104 info->pending_bh &= ~BH_STATUS;
1105 rc = BH_STATUS;
1106 }
1107
1108 if (!rc) {
1109 /* Mark BH routine as complete */
1110 info->bh_running = 0;
1111 info->bh_requested = 0;
1112 }
1113
1114 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1115
1116 return rc;
1117 }
1118
1119 /*
1120 * Perform bottom half processing of work items queued by ISR.
1121 */
mgsl_bh_handler(void * Context)1122 void mgsl_bh_handler(void* Context)
1123 {
1124 struct mgsl_struct *info = (struct mgsl_struct*)Context;
1125 int action;
1126
1127 if (!info)
1128 return;
1129
1130 if ( debug_level >= DEBUG_LEVEL_BH )
1131 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1132 __FILE__,__LINE__,info->device_name);
1133
1134 info->bh_running = 1;
1135
1136 while((action = mgsl_bh_action(info)) != 0) {
1137
1138 /* Process work item */
1139 if ( debug_level >= DEBUG_LEVEL_BH )
1140 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1141 __FILE__,__LINE__,action);
1142
1143 switch (action) {
1144
1145 case BH_RECEIVE:
1146 mgsl_bh_receive(info);
1147 break;
1148 case BH_TRANSMIT:
1149 mgsl_bh_transmit(info);
1150 break;
1151 case BH_STATUS:
1152 mgsl_bh_status(info);
1153 break;
1154 default:
1155 /* unknown work item ID */
1156 printk("Unknown work item ID=%08X!\n", action);
1157 break;
1158 }
1159 }
1160
1161 if ( debug_level >= DEBUG_LEVEL_BH )
1162 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1163 __FILE__,__LINE__,info->device_name);
1164 }
1165
mgsl_bh_receive(struct mgsl_struct * info)1166 void mgsl_bh_receive(struct mgsl_struct *info)
1167 {
1168 int (*get_rx_frame)(struct mgsl_struct *info) =
1169 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1170
1171 if ( debug_level >= DEBUG_LEVEL_BH )
1172 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1173 __FILE__,__LINE__,info->device_name);
1174 do
1175 {
1176 if (info->rx_rcc_underrun) {
1177 unsigned long flags;
1178 spin_lock_irqsave(&info->irq_spinlock,flags);
1179 usc_start_receiver(info);
1180 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1181 return;
1182 }
1183 } while(get_rx_frame(info));
1184 }
1185
mgsl_bh_transmit(struct mgsl_struct * info)1186 void mgsl_bh_transmit(struct mgsl_struct *info)
1187 {
1188 struct tty_struct *tty = info->tty;
1189 unsigned long flags;
1190
1191 if ( debug_level >= DEBUG_LEVEL_BH )
1192 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1193 __FILE__,__LINE__,info->device_name);
1194
1195 if (tty) {
1196 tty_wakeup(tty);
1197 }
1198
1199 /* if transmitter idle and loopmode_send_done_requested
1200 * then start echoing RxD to TxD
1201 */
1202 spin_lock_irqsave(&info->irq_spinlock,flags);
1203 if ( !info->tx_active && info->loopmode_send_done_requested )
1204 usc_loopmode_send_done( info );
1205 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1206 }
1207
mgsl_bh_status(struct mgsl_struct * info)1208 void mgsl_bh_status(struct mgsl_struct *info)
1209 {
1210 if ( debug_level >= DEBUG_LEVEL_BH )
1211 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1212 __FILE__,__LINE__,info->device_name);
1213
1214 info->ri_chkcount = 0;
1215 info->dsr_chkcount = 0;
1216 info->dcd_chkcount = 0;
1217 info->cts_chkcount = 0;
1218 }
1219
1220 /* mgsl_isr_receive_status()
1221 *
1222 * Service a receive status interrupt. The type of status
1223 * interrupt is indicated by the state of the RCSR.
1224 * This is only used for HDLC mode.
1225 *
1226 * Arguments: info pointer to device instance data
1227 * Return Value: None
1228 */
mgsl_isr_receive_status(struct mgsl_struct * info)1229 void mgsl_isr_receive_status( struct mgsl_struct *info )
1230 {
1231 u16 status = usc_InReg( info, RCSR );
1232
1233 if ( debug_level >= DEBUG_LEVEL_ISR )
1234 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1235 __FILE__,__LINE__,status);
1236
1237 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1238 info->loopmode_insert_requested &&
1239 usc_loopmode_active(info) )
1240 {
1241 ++info->icount.rxabort;
1242 info->loopmode_insert_requested = FALSE;
1243
1244 /* clear CMR:13 to start echoing RxD to TxD */
1245 info->cmr_value &= ~BIT13;
1246 usc_OutReg(info, CMR, info->cmr_value);
1247
1248 /* disable received abort irq (no longer required) */
1249 usc_OutReg(info, RICR,
1250 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1251 }
1252
1253 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1254 if (status & RXSTATUS_EXITED_HUNT)
1255 info->icount.exithunt++;
1256 if (status & RXSTATUS_IDLE_RECEIVED)
1257 info->icount.rxidle++;
1258 wake_up_interruptible(&info->event_wait_q);
1259 }
1260
1261 if (status & RXSTATUS_OVERRUN){
1262 info->icount.rxover++;
1263 usc_process_rxoverrun_sync( info );
1264 }
1265
1266 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1267 usc_UnlatchRxstatusBits( info, status );
1268
1269 } /* end of mgsl_isr_receive_status() */
1270
1271 /* mgsl_isr_transmit_status()
1272 *
1273 * Service a transmit status interrupt
1274 * HDLC mode :end of transmit frame
1275 * Async mode:all data is sent
1276 * transmit status is indicated by bits in the TCSR.
1277 *
1278 * Arguments: info pointer to device instance data
1279 * Return Value: None
1280 */
mgsl_isr_transmit_status(struct mgsl_struct * info)1281 void mgsl_isr_transmit_status( struct mgsl_struct *info )
1282 {
1283 u16 status = usc_InReg( info, TCSR );
1284
1285 if ( debug_level >= DEBUG_LEVEL_ISR )
1286 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1287 __FILE__,__LINE__,status);
1288
1289 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1290 usc_UnlatchTxstatusBits( info, status );
1291
1292 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1293 {
1294 /* finished sending HDLC abort. This may leave */
1295 /* the TxFifo with data from the aborted frame */
1296 /* so purge the TxFifo. Also shutdown the DMA */
1297 /* channel in case there is data remaining in */
1298 /* the DMA buffer */
1299 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1300 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1301 }
1302
1303 if ( status & TXSTATUS_EOF_SENT )
1304 info->icount.txok++;
1305 else if ( status & TXSTATUS_UNDERRUN )
1306 info->icount.txunder++;
1307 else if ( status & TXSTATUS_ABORT_SENT )
1308 info->icount.txabort++;
1309 else
1310 info->icount.txunder++;
1311
1312 info->tx_active = 0;
1313 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1314 del_timer(&info->tx_timer);
1315
1316 if ( info->drop_rts_on_tx_done ) {
1317 usc_get_serial_signals( info );
1318 if ( info->serial_signals & SerialSignal_RTS ) {
1319 info->serial_signals &= ~SerialSignal_RTS;
1320 usc_set_serial_signals( info );
1321 }
1322 info->drop_rts_on_tx_done = 0;
1323 }
1324
1325 #ifdef CONFIG_SYNCLINK_SYNCPPP
1326 if (info->netcount)
1327 mgsl_sppp_tx_done(info);
1328 else
1329 #endif
1330 {
1331 if (info->tty->stopped || info->tty->hw_stopped) {
1332 usc_stop_transmitter(info);
1333 return;
1334 }
1335 info->pending_bh |= BH_TRANSMIT;
1336 }
1337
1338 } /* end of mgsl_isr_transmit_status() */
1339
1340 /* mgsl_isr_io_pin()
1341 *
1342 * Service an Input/Output pin interrupt. The type of
1343 * interrupt is indicated by bits in the MISR
1344 *
1345 * Arguments: info pointer to device instance data
1346 * Return Value: None
1347 */
mgsl_isr_io_pin(struct mgsl_struct * info)1348 void mgsl_isr_io_pin( struct mgsl_struct *info )
1349 {
1350 struct mgsl_icount *icount;
1351 u16 status = usc_InReg( info, MISR );
1352
1353 if ( debug_level >= DEBUG_LEVEL_ISR )
1354 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1355 __FILE__,__LINE__,status);
1356
1357 usc_ClearIrqPendingBits( info, IO_PIN );
1358 usc_UnlatchIostatusBits( info, status );
1359
1360 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1361 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1362 icount = &info->icount;
1363 /* update input line counters */
1364 if (status & MISCSTATUS_RI_LATCHED) {
1365 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1366 usc_DisablestatusIrqs(info,SICR_RI);
1367 icount->rng++;
1368 if ( status & MISCSTATUS_RI )
1369 info->input_signal_events.ri_up++;
1370 else
1371 info->input_signal_events.ri_down++;
1372 }
1373 if (status & MISCSTATUS_DSR_LATCHED) {
1374 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1375 usc_DisablestatusIrqs(info,SICR_DSR);
1376 icount->dsr++;
1377 if ( status & MISCSTATUS_DSR )
1378 info->input_signal_events.dsr_up++;
1379 else
1380 info->input_signal_events.dsr_down++;
1381 }
1382 if (status & MISCSTATUS_DCD_LATCHED) {
1383 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1384 usc_DisablestatusIrqs(info,SICR_DCD);
1385 icount->dcd++;
1386 if (status & MISCSTATUS_DCD) {
1387 info->input_signal_events.dcd_up++;
1388 #ifdef CONFIG_SYNCLINK_SYNCPPP
1389 if (info->netcount)
1390 sppp_reopen(info->netdev);
1391 #endif
1392 } else
1393 info->input_signal_events.dcd_down++;
1394 }
1395 if (status & MISCSTATUS_CTS_LATCHED)
1396 {
1397 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1398 usc_DisablestatusIrqs(info,SICR_CTS);
1399 icount->cts++;
1400 if ( status & MISCSTATUS_CTS )
1401 info->input_signal_events.cts_up++;
1402 else
1403 info->input_signal_events.cts_down++;
1404 }
1405 wake_up_interruptible(&info->status_event_wait_q);
1406 wake_up_interruptible(&info->event_wait_q);
1407
1408 if ( (info->flags & ASYNC_CHECK_CD) &&
1409 (status & MISCSTATUS_DCD_LATCHED) ) {
1410 if ( debug_level >= DEBUG_LEVEL_ISR )
1411 printk("%s CD now %s...", info->device_name,
1412 (status & MISCSTATUS_DCD) ? "on" : "off");
1413 if (status & MISCSTATUS_DCD)
1414 wake_up_interruptible(&info->open_wait);
1415 else if (!((info->flags & ASYNC_CALLOUT_ACTIVE) &&
1416 (info->flags & ASYNC_CALLOUT_NOHUP))) {
1417 if ( debug_level >= DEBUG_LEVEL_ISR )
1418 printk("doing serial hangup...");
1419 if (info->tty)
1420 tty_hangup(info->tty);
1421 }
1422 }
1423
1424 if ( (info->flags & ASYNC_CTS_FLOW) &&
1425 (status & MISCSTATUS_CTS_LATCHED) ) {
1426 if (info->tty->hw_stopped) {
1427 if (status & MISCSTATUS_CTS) {
1428 if ( debug_level >= DEBUG_LEVEL_ISR )
1429 printk("CTS tx start...");
1430 if (info->tty)
1431 info->tty->hw_stopped = 0;
1432 usc_start_transmitter(info);
1433 info->pending_bh |= BH_TRANSMIT;
1434 return;
1435 }
1436 } else {
1437 if (!(status & MISCSTATUS_CTS)) {
1438 if ( debug_level >= DEBUG_LEVEL_ISR )
1439 printk("CTS tx stop...");
1440 if (info->tty)
1441 info->tty->hw_stopped = 1;
1442 usc_stop_transmitter(info);
1443 }
1444 }
1445 }
1446 }
1447
1448 info->pending_bh |= BH_STATUS;
1449
1450 /* for diagnostics set IRQ flag */
1451 if ( status & MISCSTATUS_TXC_LATCHED ){
1452 usc_OutReg( info, SICR,
1453 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1454 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1455 info->irq_occurred = 1;
1456 }
1457
1458 } /* end of mgsl_isr_io_pin() */
1459
1460 /* mgsl_isr_transmit_data()
1461 *
1462 * Service a transmit data interrupt (async mode only).
1463 *
1464 * Arguments: info pointer to device instance data
1465 * Return Value: None
1466 */
mgsl_isr_transmit_data(struct mgsl_struct * info)1467 void mgsl_isr_transmit_data( struct mgsl_struct *info )
1468 {
1469 if ( debug_level >= DEBUG_LEVEL_ISR )
1470 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1471 __FILE__,__LINE__,info->xmit_cnt);
1472
1473 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1474
1475 if (info->tty->stopped || info->tty->hw_stopped) {
1476 usc_stop_transmitter(info);
1477 return;
1478 }
1479
1480 if ( info->xmit_cnt )
1481 usc_load_txfifo( info );
1482 else
1483 info->tx_active = 0;
1484
1485 if (info->xmit_cnt < WAKEUP_CHARS)
1486 info->pending_bh |= BH_TRANSMIT;
1487
1488 } /* end of mgsl_isr_transmit_data() */
1489
1490 /* mgsl_isr_receive_data()
1491 *
1492 * Service a receive data interrupt. This occurs
1493 * when operating in asynchronous interrupt transfer mode.
1494 * The receive data FIFO is flushed to the receive data buffers.
1495 *
1496 * Arguments: info pointer to device instance data
1497 * Return Value: None
1498 */
mgsl_isr_receive_data(struct mgsl_struct * info)1499 void mgsl_isr_receive_data( struct mgsl_struct *info )
1500 {
1501 int Fifocount;
1502 u16 status;
1503 unsigned char DataByte;
1504 struct tty_struct *tty = info->tty;
1505 struct mgsl_icount *icount = &info->icount;
1506
1507 if ( debug_level >= DEBUG_LEVEL_ISR )
1508 printk("%s(%d):mgsl_isr_receive_data\n",
1509 __FILE__,__LINE__);
1510
1511 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1512
1513 /* select FIFO status for RICR readback */
1514 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1515
1516 /* clear the Wordstatus bit so that status readback */
1517 /* only reflects the status of this byte */
1518 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1519
1520 /* flush the receive FIFO */
1521
1522 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1523 /* read one byte from RxFIFO */
1524 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1525 info->io_base + CCAR );
1526 DataByte = inb( info->io_base + CCAR );
1527
1528 /* get the status of the received byte */
1529 status = usc_InReg(info, RCSR);
1530 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1531 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1532 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1533
1534 if (tty->flip.count >= TTY_FLIPBUF_SIZE)
1535 continue;
1536
1537 *tty->flip.char_buf_ptr = DataByte;
1538 icount->rx++;
1539
1540 *tty->flip.flag_buf_ptr = 0;
1541 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1542 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1543 printk("rxerr=%04X\n",status);
1544 /* update error statistics */
1545 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1546 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1547 icount->brk++;
1548 } else if (status & RXSTATUS_PARITY_ERROR)
1549 icount->parity++;
1550 else if (status & RXSTATUS_FRAMING_ERROR)
1551 icount->frame++;
1552 else if (status & RXSTATUS_OVERRUN) {
1553 /* must issue purge fifo cmd before */
1554 /* 16C32 accepts more receive chars */
1555 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1556 icount->overrun++;
1557 }
1558
1559 /* discard char if tty control flags say so */
1560 if (status & info->ignore_status_mask)
1561 continue;
1562
1563 status &= info->read_status_mask;
1564
1565 if (status & RXSTATUS_BREAK_RECEIVED) {
1566 *tty->flip.flag_buf_ptr = TTY_BREAK;
1567 if (info->flags & ASYNC_SAK)
1568 do_SAK(tty);
1569 } else if (status & RXSTATUS_PARITY_ERROR)
1570 *tty->flip.flag_buf_ptr = TTY_PARITY;
1571 else if (status & RXSTATUS_FRAMING_ERROR)
1572 *tty->flip.flag_buf_ptr = TTY_FRAME;
1573 if (status & RXSTATUS_OVERRUN) {
1574 /* Overrun is special, since it's
1575 * reported immediately, and doesn't
1576 * affect the current character
1577 */
1578 if (tty->flip.count < TTY_FLIPBUF_SIZE) {
1579 tty->flip.count++;
1580 tty->flip.flag_buf_ptr++;
1581 tty->flip.char_buf_ptr++;
1582 *tty->flip.flag_buf_ptr = TTY_OVERRUN;
1583 }
1584 }
1585 } /* end of if (error) */
1586
1587 tty->flip.flag_buf_ptr++;
1588 tty->flip.char_buf_ptr++;
1589 tty->flip.count++;
1590 }
1591
1592 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1593 printk("%s(%d):mgsl_isr_receive_data flip count=%d\n",
1594 __FILE__,__LINE__,tty->flip.count);
1595 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1596 __FILE__,__LINE__,icount->rx,icount->brk,
1597 icount->parity,icount->frame,icount->overrun);
1598 }
1599
1600 if ( tty->flip.count )
1601 tty_flip_buffer_push(tty);
1602 }
1603
1604 /* mgsl_isr_misc()
1605 *
1606 * Service a miscellaneos interrupt source.
1607 *
1608 * Arguments: info pointer to device extension (instance data)
1609 * Return Value: None
1610 */
mgsl_isr_misc(struct mgsl_struct * info)1611 void mgsl_isr_misc( struct mgsl_struct *info )
1612 {
1613 u16 status = usc_InReg( info, MISR );
1614
1615 if ( debug_level >= DEBUG_LEVEL_ISR )
1616 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1617 __FILE__,__LINE__,status);
1618
1619 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1620 (info->params.mode == MGSL_MODE_HDLC)) {
1621
1622 /* turn off receiver and rx DMA */
1623 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1624 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1625 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1626 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1627 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1628
1629 /* schedule BH handler to restart receiver */
1630 info->pending_bh |= BH_RECEIVE;
1631 info->rx_rcc_underrun = 1;
1632 }
1633
1634 usc_ClearIrqPendingBits( info, MISC );
1635 usc_UnlatchMiscstatusBits( info, status );
1636
1637 } /* end of mgsl_isr_misc() */
1638
1639 /* mgsl_isr_null()
1640 *
1641 * Services undefined interrupt vectors from the
1642 * USC. (hence this function SHOULD never be called)
1643 *
1644 * Arguments: info pointer to device extension (instance data)
1645 * Return Value: None
1646 */
mgsl_isr_null(struct mgsl_struct * info)1647 void mgsl_isr_null( struct mgsl_struct *info )
1648 {
1649
1650 } /* end of mgsl_isr_null() */
1651
1652 /* mgsl_isr_receive_dma()
1653 *
1654 * Service a receive DMA channel interrupt.
1655 * For this driver there are two sources of receive DMA interrupts
1656 * as identified in the Receive DMA mode Register (RDMR):
1657 *
1658 * BIT3 EOA/EOL End of List, all receive buffers in receive
1659 * buffer list have been filled (no more free buffers
1660 * available). The DMA controller has shut down.
1661 *
1662 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1663 * DMA buffer is terminated in response to completion
1664 * of a good frame or a frame with errors. The status
1665 * of the frame is stored in the buffer entry in the
1666 * list of receive buffer entries.
1667 *
1668 * Arguments: info pointer to device instance data
1669 * Return Value: None
1670 */
mgsl_isr_receive_dma(struct mgsl_struct * info)1671 void mgsl_isr_receive_dma( struct mgsl_struct *info )
1672 {
1673 u16 status;
1674
1675 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1676 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1677
1678 /* Read the receive DMA status to identify interrupt type. */
1679 /* This also clears the status bits. */
1680 status = usc_InDmaReg( info, RDMR );
1681
1682 if ( debug_level >= DEBUG_LEVEL_ISR )
1683 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1684 __FILE__,__LINE__,info->device_name,status);
1685
1686 info->pending_bh |= BH_RECEIVE;
1687
1688 if ( status & BIT3 ) {
1689 info->rx_overflow = 1;
1690 info->icount.buf_overrun++;
1691 }
1692
1693 } /* end of mgsl_isr_receive_dma() */
1694
1695 /* mgsl_isr_transmit_dma()
1696 *
1697 * This function services a transmit DMA channel interrupt.
1698 *
1699 * For this driver there is one source of transmit DMA interrupts
1700 * as identified in the Transmit DMA Mode Register (TDMR):
1701 *
1702 * BIT2 EOB End of Buffer. This interrupt occurs when a
1703 * transmit DMA buffer has been emptied.
1704 *
1705 * The driver maintains enough transmit DMA buffers to hold at least
1706 * one max frame size transmit frame. When operating in a buffered
1707 * transmit mode, there may be enough transmit DMA buffers to hold at
1708 * least two or more max frame size frames. On an EOB condition,
1709 * determine if there are any queued transmit buffers and copy into
1710 * transmit DMA buffers if we have room.
1711 *
1712 * Arguments: info pointer to device instance data
1713 * Return Value: None
1714 */
mgsl_isr_transmit_dma(struct mgsl_struct * info)1715 void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1716 {
1717 u16 status;
1718
1719 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1720 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1721
1722 /* Read the transmit DMA status to identify interrupt type. */
1723 /* This also clears the status bits. */
1724
1725 status = usc_InDmaReg( info, TDMR );
1726
1727 if ( debug_level >= DEBUG_LEVEL_ISR )
1728 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1729 __FILE__,__LINE__,info->device_name,status);
1730
1731 if ( status & BIT2 ) {
1732 --info->tx_dma_buffers_used;
1733
1734 /* if there are transmit frames queued,
1735 * try to load the next one
1736 */
1737 if ( load_next_tx_holding_buffer(info) ) {
1738 /* if call returns non-zero value, we have
1739 * at least one free tx holding buffer
1740 */
1741 info->pending_bh |= BH_TRANSMIT;
1742 }
1743 }
1744
1745 } /* end of mgsl_isr_transmit_dma() */
1746
1747 /* mgsl_interrupt()
1748 *
1749 * Interrupt service routine entry point.
1750 *
1751 * Arguments:
1752 *
1753 * irq interrupt number that caused interrupt
1754 * dev_id device ID supplied during interrupt registration
1755 * regs interrupted processor context
1756 *
1757 * Return Value: None
1758 */
mgsl_interrupt(int irq,void * dev_id,struct pt_regs * regs)1759 static void mgsl_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1760 {
1761 struct mgsl_struct * info;
1762 u16 UscVector;
1763 u16 DmaVector;
1764
1765 if ( debug_level >= DEBUG_LEVEL_ISR )
1766 printk("%s(%d):mgsl_interrupt(%d)entry.\n",
1767 __FILE__,__LINE__,irq);
1768
1769 info = (struct mgsl_struct *)dev_id;
1770 if (!info)
1771 return;
1772
1773 spin_lock(&info->irq_spinlock);
1774
1775 for(;;) {
1776 /* Read the interrupt vectors from hardware. */
1777 UscVector = usc_InReg(info, IVR) >> 9;
1778 DmaVector = usc_InDmaReg(info, DIVR);
1779
1780 if ( debug_level >= DEBUG_LEVEL_ISR )
1781 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1782 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1783
1784 if ( !UscVector && !DmaVector )
1785 break;
1786
1787 /* Dispatch interrupt vector */
1788 if ( UscVector )
1789 (*UscIsrTable[UscVector])(info);
1790 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1791 mgsl_isr_transmit_dma(info);
1792 else
1793 mgsl_isr_receive_dma(info);
1794
1795 if ( info->isr_overflow ) {
1796 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n",
1797 __FILE__,__LINE__,info->device_name, irq);
1798 usc_DisableMasterIrqBit(info);
1799 usc_DisableDmaInterrupts(info,DICR_MASTER);
1800 break;
1801 }
1802 }
1803
1804 /* Request bottom half processing if there's something
1805 * for it to do and the bh is not already running
1806 */
1807
1808 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1809 if ( debug_level >= DEBUG_LEVEL_ISR )
1810 printk("%s(%d):%s queueing bh task.\n",
1811 __FILE__,__LINE__,info->device_name);
1812 queue_task(&info->task, &tq_immediate);
1813 mark_bh(IMMEDIATE_BH);
1814 info->bh_requested = 1;
1815 }
1816
1817 spin_unlock(&info->irq_spinlock);
1818
1819 if ( debug_level >= DEBUG_LEVEL_ISR )
1820 printk("%s(%d):mgsl_interrupt(%d)exit.\n",
1821 __FILE__,__LINE__,irq);
1822
1823 } /* end of mgsl_interrupt() */
1824
1825 /* startup()
1826 *
1827 * Initialize and start device.
1828 *
1829 * Arguments: info pointer to device instance data
1830 * Return Value: 0 if success, otherwise error code
1831 */
startup(struct mgsl_struct * info)1832 static int startup(struct mgsl_struct * info)
1833 {
1834 int retval = 0;
1835
1836 if ( debug_level >= DEBUG_LEVEL_INFO )
1837 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1838
1839 if (info->flags & ASYNC_INITIALIZED)
1840 return 0;
1841
1842 if (!info->xmit_buf) {
1843 /* allocate a page of memory for a transmit buffer */
1844 info->xmit_buf = (unsigned char *)get_free_page(GFP_KERNEL);
1845 if (!info->xmit_buf) {
1846 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1847 __FILE__,__LINE__,info->device_name);
1848 return -ENOMEM;
1849 }
1850 }
1851
1852 info->pending_bh = 0;
1853
1854 init_timer(&info->tx_timer);
1855 info->tx_timer.data = (unsigned long)info;
1856 info->tx_timer.function = mgsl_tx_timeout;
1857
1858 /* Allocate and claim adapter resources */
1859 retval = mgsl_claim_resources(info);
1860
1861 /* perform existence check and diagnostics */
1862 if ( !retval )
1863 retval = mgsl_adapter_test(info);
1864
1865 if ( retval ) {
1866 if (capable(CAP_SYS_ADMIN) && info->tty)
1867 set_bit(TTY_IO_ERROR, &info->tty->flags);
1868 mgsl_release_resources(info);
1869 return retval;
1870 }
1871
1872 /* program hardware for current parameters */
1873 mgsl_change_params(info);
1874
1875 if (info->tty)
1876 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1877
1878 info->flags |= ASYNC_INITIALIZED;
1879
1880 return 0;
1881
1882 } /* end of startup() */
1883
1884 /* shutdown()
1885 *
1886 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1887 *
1888 * Arguments: info pointer to device instance data
1889 * Return Value: None
1890 */
shutdown(struct mgsl_struct * info)1891 static void shutdown(struct mgsl_struct * info)
1892 {
1893 unsigned long flags;
1894
1895 if (!(info->flags & ASYNC_INITIALIZED))
1896 return;
1897
1898 if (debug_level >= DEBUG_LEVEL_INFO)
1899 printk("%s(%d):mgsl_shutdown(%s)\n",
1900 __FILE__,__LINE__, info->device_name );
1901
1902 /* clear status wait queue because status changes */
1903 /* can't happen after shutting down the hardware */
1904 wake_up_interruptible(&info->status_event_wait_q);
1905 wake_up_interruptible(&info->event_wait_q);
1906
1907 del_timer(&info->tx_timer);
1908
1909 if (info->xmit_buf) {
1910 free_page((unsigned long) info->xmit_buf);
1911 info->xmit_buf = 0;
1912 }
1913
1914 spin_lock_irqsave(&info->irq_spinlock,flags);
1915 usc_DisableMasterIrqBit(info);
1916 usc_stop_receiver(info);
1917 usc_stop_transmitter(info);
1918 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1919 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1920 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1921
1922 /* Disable DMAEN (Port 7, Bit 14) */
1923 /* This disconnects the DMA request signal from the ISA bus */
1924 /* on the ISA adapter. This has no effect for the PCI adapter */
1925 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1926
1927 /* Disable INTEN (Port 6, Bit12) */
1928 /* This disconnects the IRQ request signal to the ISA bus */
1929 /* on the ISA adapter. This has no effect for the PCI adapter */
1930 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1931
1932 if (!info->tty || info->tty->termios->c_cflag & HUPCL) {
1933 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1934 usc_set_serial_signals(info);
1935 }
1936
1937 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1938
1939 mgsl_release_resources(info);
1940
1941 if (info->tty)
1942 set_bit(TTY_IO_ERROR, &info->tty->flags);
1943
1944 info->flags &= ~ASYNC_INITIALIZED;
1945
1946 } /* end of shutdown() */
1947
mgsl_program_hw(struct mgsl_struct * info)1948 static void mgsl_program_hw(struct mgsl_struct *info)
1949 {
1950 unsigned long flags;
1951
1952 spin_lock_irqsave(&info->irq_spinlock,flags);
1953
1954 usc_stop_receiver(info);
1955 usc_stop_transmitter(info);
1956 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1957
1958 if (info->params.mode == MGSL_MODE_HDLC ||
1959 info->params.mode == MGSL_MODE_RAW ||
1960 info->netcount)
1961 usc_set_sync_mode(info);
1962 else
1963 usc_set_async_mode(info);
1964
1965 usc_set_serial_signals(info);
1966
1967 info->dcd_chkcount = 0;
1968 info->cts_chkcount = 0;
1969 info->ri_chkcount = 0;
1970 info->dsr_chkcount = 0;
1971
1972 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1973 usc_EnableInterrupts(info, IO_PIN);
1974 usc_get_serial_signals(info);
1975
1976 if (info->netcount || info->tty->termios->c_cflag & CREAD)
1977 usc_start_receiver(info);
1978
1979 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1980 }
1981
1982 /* Reconfigure adapter based on new parameters
1983 */
mgsl_change_params(struct mgsl_struct * info)1984 static void mgsl_change_params(struct mgsl_struct *info)
1985 {
1986 unsigned cflag;
1987 int bits_per_char;
1988
1989 if (!info->tty || !info->tty->termios)
1990 return;
1991
1992 if (debug_level >= DEBUG_LEVEL_INFO)
1993 printk("%s(%d):mgsl_change_params(%s)\n",
1994 __FILE__,__LINE__, info->device_name );
1995
1996 cflag = info->tty->termios->c_cflag;
1997
1998 /* if B0 rate (hangup) specified then negate DTR and RTS */
1999 /* otherwise assert DTR and RTS */
2000 if (cflag & CBAUD)
2001 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
2002 else
2003 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
2004
2005 /* byte size and parity */
2006
2007 switch (cflag & CSIZE) {
2008 case CS5: info->params.data_bits = 5; break;
2009 case CS6: info->params.data_bits = 6; break;
2010 case CS7: info->params.data_bits = 7; break;
2011 case CS8: info->params.data_bits = 8; break;
2012 /* Never happens, but GCC is too dumb to figure it out */
2013 default: info->params.data_bits = 7; break;
2014 }
2015
2016 if (cflag & CSTOPB)
2017 info->params.stop_bits = 2;
2018 else
2019 info->params.stop_bits = 1;
2020
2021 info->params.parity = ASYNC_PARITY_NONE;
2022 if (cflag & PARENB) {
2023 if (cflag & PARODD)
2024 info->params.parity = ASYNC_PARITY_ODD;
2025 else
2026 info->params.parity = ASYNC_PARITY_EVEN;
2027 #ifdef CMSPAR
2028 if (cflag & CMSPAR)
2029 info->params.parity = ASYNC_PARITY_SPACE;
2030 #endif
2031 }
2032
2033 /* calculate number of jiffies to transmit a full
2034 * FIFO (32 bytes) at specified data rate
2035 */
2036 bits_per_char = info->params.data_bits +
2037 info->params.stop_bits + 1;
2038
2039 /* if port data rate is set to 460800 or less then
2040 * allow tty settings to override, otherwise keep the
2041 * current data rate.
2042 */
2043 if (info->params.data_rate <= 460800)
2044 info->params.data_rate = tty_get_baud_rate(info->tty);
2045
2046 if ( info->params.data_rate ) {
2047 info->timeout = (32*HZ*bits_per_char) /
2048 info->params.data_rate;
2049 }
2050 info->timeout += HZ/50; /* Add .02 seconds of slop */
2051
2052 if (cflag & CRTSCTS)
2053 info->flags |= ASYNC_CTS_FLOW;
2054 else
2055 info->flags &= ~ASYNC_CTS_FLOW;
2056
2057 if (cflag & CLOCAL)
2058 info->flags &= ~ASYNC_CHECK_CD;
2059 else
2060 info->flags |= ASYNC_CHECK_CD;
2061
2062 /* process tty input control flags */
2063
2064 info->read_status_mask = RXSTATUS_OVERRUN;
2065 if (I_INPCK(info->tty))
2066 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2067 if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
2068 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
2069
2070 if (I_IGNPAR(info->tty))
2071 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2072 if (I_IGNBRK(info->tty)) {
2073 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
2074 /* If ignoring parity and break indicators, ignore
2075 * overruns too. (For real raw support).
2076 */
2077 if (I_IGNPAR(info->tty))
2078 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2079 }
2080
2081 mgsl_program_hw(info);
2082
2083 } /* end of mgsl_change_params() */
2084
2085 /* mgsl_put_char()
2086 *
2087 * Add a character to the transmit buffer.
2088 *
2089 * Arguments: tty pointer to tty information structure
2090 * ch character to add to transmit buffer
2091 *
2092 * Return Value: None
2093 */
mgsl_put_char(struct tty_struct * tty,unsigned char ch)2094 static void mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2095 {
2096 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2097 unsigned long flags;
2098
2099 if ( debug_level >= DEBUG_LEVEL_INFO ) {
2100 printk( "%s(%d):mgsl_put_char(%d) on %s\n",
2101 __FILE__,__LINE__,ch,info->device_name);
2102 }
2103
2104 if (mgsl_paranoia_check(info, tty->device, "mgsl_put_char"))
2105 return;
2106
2107 if (!tty || !info->xmit_buf)
2108 return;
2109
2110 spin_lock_irqsave(&info->irq_spinlock,flags);
2111
2112 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
2113
2114 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2115 info->xmit_buf[info->xmit_head++] = ch;
2116 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2117 info->xmit_cnt++;
2118 }
2119 }
2120
2121 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2122
2123 } /* end of mgsl_put_char() */
2124
2125 /* mgsl_flush_chars()
2126 *
2127 * Enable transmitter so remaining characters in the
2128 * transmit buffer are sent.
2129 *
2130 * Arguments: tty pointer to tty information structure
2131 * Return Value: None
2132 */
mgsl_flush_chars(struct tty_struct * tty)2133 static void mgsl_flush_chars(struct tty_struct *tty)
2134 {
2135 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2136 unsigned long flags;
2137
2138 if ( debug_level >= DEBUG_LEVEL_INFO )
2139 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2140 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2141
2142 if (mgsl_paranoia_check(info, tty->device, "mgsl_flush_chars"))
2143 return;
2144
2145 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2146 !info->xmit_buf)
2147 return;
2148
2149 if ( debug_level >= DEBUG_LEVEL_INFO )
2150 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2151 __FILE__,__LINE__,info->device_name );
2152
2153 spin_lock_irqsave(&info->irq_spinlock,flags);
2154
2155 if (!info->tx_active) {
2156 if ( (info->params.mode == MGSL_MODE_HDLC ||
2157 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2158 /* operating in synchronous (frame oriented) mode */
2159 /* copy data from circular xmit_buf to */
2160 /* transmit DMA buffer. */
2161 mgsl_load_tx_dma_buffer(info,
2162 info->xmit_buf,info->xmit_cnt);
2163 }
2164 usc_start_transmitter(info);
2165 }
2166
2167 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2168
2169 } /* end of mgsl_flush_chars() */
2170
2171 /* mgsl_write()
2172 *
2173 * Send a block of data
2174 *
2175 * Arguments:
2176 *
2177 * tty pointer to tty information structure
2178 * from_user flag: 1 = from user process
2179 * buf pointer to buffer containing send data
2180 * count size of send data in bytes
2181 *
2182 * Return Value: number of characters written
2183 */
mgsl_write(struct tty_struct * tty,int from_user,const unsigned char * buf,int count)2184 static int mgsl_write(struct tty_struct * tty, int from_user,
2185 const unsigned char *buf, int count)
2186 {
2187 int c, ret = 0, err;
2188 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2189 unsigned long flags;
2190
2191 if ( debug_level >= DEBUG_LEVEL_INFO )
2192 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2193 __FILE__,__LINE__,info->device_name,count);
2194
2195 if (mgsl_paranoia_check(info, tty->device, "mgsl_write"))
2196 goto cleanup;
2197
2198 if (!tty || !info->xmit_buf || !tmp_buf)
2199 goto cleanup;
2200
2201 if ( info->params.mode == MGSL_MODE_HDLC ||
2202 info->params.mode == MGSL_MODE_RAW ) {
2203 /* operating in synchronous (frame oriented) mode */
2204 /* operating in synchronous (frame oriented) mode */
2205 if (info->tx_active) {
2206
2207 if ( info->params.mode == MGSL_MODE_HDLC ) {
2208 ret = 0;
2209 goto cleanup;
2210 }
2211 /* transmitter is actively sending data -
2212 * if we have multiple transmit dma and
2213 * holding buffers, attempt to queue this
2214 * frame for transmission at a later time.
2215 */
2216 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2217 /* no tx holding buffers available */
2218 ret = 0;
2219 goto cleanup;
2220 }
2221
2222 /* queue transmit frame request */
2223 ret = count;
2224 if (from_user) {
2225 down(&tmp_buf_sem);
2226 COPY_FROM_USER(err,tmp_buf, buf, count);
2227 if (err) {
2228 if ( debug_level >= DEBUG_LEVEL_INFO )
2229 printk( "%s(%d):mgsl_write(%s) sync user buf copy failed\n",
2230 __FILE__,__LINE__,info->device_name);
2231 ret = -EFAULT;
2232 } else
2233 save_tx_buffer_request(info,tmp_buf,count);
2234 up(&tmp_buf_sem);
2235 }
2236 else
2237 save_tx_buffer_request(info,buf,count);
2238
2239 /* if we have sufficient tx dma buffers,
2240 * load the next buffered tx request
2241 */
2242 spin_lock_irqsave(&info->irq_spinlock,flags);
2243 load_next_tx_holding_buffer(info);
2244 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2245 goto cleanup;
2246 }
2247
2248 /* if operating in HDLC LoopMode and the adapter */
2249 /* has yet to be inserted into the loop, we can't */
2250 /* transmit */
2251
2252 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2253 !usc_loopmode_active(info) )
2254 {
2255 ret = 0;
2256 goto cleanup;
2257 }
2258
2259 if ( info->xmit_cnt ) {
2260 /* Send accumulated from send_char() calls */
2261 /* as frame and wait before accepting more data. */
2262 ret = 0;
2263
2264 /* copy data from circular xmit_buf to */
2265 /* transmit DMA buffer. */
2266 mgsl_load_tx_dma_buffer(info,
2267 info->xmit_buf,info->xmit_cnt);
2268 if ( debug_level >= DEBUG_LEVEL_INFO )
2269 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2270 __FILE__,__LINE__,info->device_name);
2271 } else {
2272 if ( debug_level >= DEBUG_LEVEL_INFO )
2273 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2274 __FILE__,__LINE__,info->device_name);
2275 ret = count;
2276 info->xmit_cnt = count;
2277 if (from_user) {
2278 down(&tmp_buf_sem);
2279 COPY_FROM_USER(err,tmp_buf, buf, count);
2280 if (err) {
2281 if ( debug_level >= DEBUG_LEVEL_INFO )
2282 printk( "%s(%d):mgsl_write(%s) sync user buf copy failed\n",
2283 __FILE__,__LINE__,info->device_name);
2284 ret = -EFAULT;
2285 } else
2286 mgsl_load_tx_dma_buffer(info,tmp_buf,count);
2287 up(&tmp_buf_sem);
2288 }
2289 else
2290 mgsl_load_tx_dma_buffer(info,buf,count);
2291 }
2292 } else {
2293 if (from_user) {
2294 down(&tmp_buf_sem);
2295 while (1) {
2296 c = MIN(count,
2297 MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2298 SERIAL_XMIT_SIZE - info->xmit_head));
2299 if (c <= 0)
2300 break;
2301
2302 COPY_FROM_USER(err,tmp_buf, buf, c);
2303 c -= err;
2304 if (!c) {
2305 if (!ret)
2306 ret = -EFAULT;
2307 break;
2308 }
2309 spin_lock_irqsave(&info->irq_spinlock,flags);
2310 c = MIN(c, MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2311 SERIAL_XMIT_SIZE - info->xmit_head));
2312 memcpy(info->xmit_buf + info->xmit_head, tmp_buf, c);
2313 info->xmit_head = ((info->xmit_head + c) &
2314 (SERIAL_XMIT_SIZE-1));
2315 info->xmit_cnt += c;
2316 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2317 buf += c;
2318 count -= c;
2319 ret += c;
2320 }
2321 up(&tmp_buf_sem);
2322 } else {
2323 while (1) {
2324 spin_lock_irqsave(&info->irq_spinlock,flags);
2325 c = MIN(count,
2326 MIN(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2327 SERIAL_XMIT_SIZE - info->xmit_head));
2328 if (c <= 0) {
2329 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2330 break;
2331 }
2332 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2333 info->xmit_head = ((info->xmit_head + c) &
2334 (SERIAL_XMIT_SIZE-1));
2335 info->xmit_cnt += c;
2336 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2337 buf += c;
2338 count -= c;
2339 ret += c;
2340 }
2341 }
2342 }
2343
2344 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2345 spin_lock_irqsave(&info->irq_spinlock,flags);
2346 if (!info->tx_active)
2347 usc_start_transmitter(info);
2348 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2349 }
2350 cleanup:
2351 if ( debug_level >= DEBUG_LEVEL_INFO )
2352 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2353 __FILE__,__LINE__,info->device_name,ret);
2354
2355 return ret;
2356
2357 } /* end of mgsl_write() */
2358
2359 /* mgsl_write_room()
2360 *
2361 * Return the count of free bytes in transmit buffer
2362 *
2363 * Arguments: tty pointer to tty info structure
2364 * Return Value: None
2365 */
mgsl_write_room(struct tty_struct * tty)2366 static int mgsl_write_room(struct tty_struct *tty)
2367 {
2368 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2369 int ret;
2370
2371 if (mgsl_paranoia_check(info, tty->device, "mgsl_write_room"))
2372 return 0;
2373 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2374 if (ret < 0)
2375 ret = 0;
2376
2377 if (debug_level >= DEBUG_LEVEL_INFO)
2378 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2379 __FILE__,__LINE__, info->device_name,ret );
2380
2381 if ( info->params.mode == MGSL_MODE_HDLC ||
2382 info->params.mode == MGSL_MODE_RAW ) {
2383 /* operating in synchronous (frame oriented) mode */
2384 if ( info->tx_active )
2385 return 0;
2386 else
2387 return HDLC_MAX_FRAME_SIZE;
2388 }
2389
2390 return ret;
2391
2392 } /* end of mgsl_write_room() */
2393
2394 /* mgsl_chars_in_buffer()
2395 *
2396 * Return the count of bytes in transmit buffer
2397 *
2398 * Arguments: tty pointer to tty info structure
2399 * Return Value: None
2400 */
mgsl_chars_in_buffer(struct tty_struct * tty)2401 static int mgsl_chars_in_buffer(struct tty_struct *tty)
2402 {
2403 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2404
2405 if (debug_level >= DEBUG_LEVEL_INFO)
2406 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2407 __FILE__,__LINE__, info->device_name );
2408
2409 if (mgsl_paranoia_check(info, tty->device, "mgsl_chars_in_buffer"))
2410 return 0;
2411
2412 if (debug_level >= DEBUG_LEVEL_INFO)
2413 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2414 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2415
2416 if ( info->params.mode == MGSL_MODE_HDLC ||
2417 info->params.mode == MGSL_MODE_RAW ) {
2418 /* operating in synchronous (frame oriented) mode */
2419 if ( info->tx_active )
2420 return info->max_frame_size;
2421 else
2422 return 0;
2423 }
2424
2425 return info->xmit_cnt;
2426 } /* end of mgsl_chars_in_buffer() */
2427
2428 /* mgsl_flush_buffer()
2429 *
2430 * Discard all data in the send buffer
2431 *
2432 * Arguments: tty pointer to tty info structure
2433 * Return Value: None
2434 */
mgsl_flush_buffer(struct tty_struct * tty)2435 static void mgsl_flush_buffer(struct tty_struct *tty)
2436 {
2437 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2438 unsigned long flags;
2439
2440 if (debug_level >= DEBUG_LEVEL_INFO)
2441 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2442 __FILE__,__LINE__, info->device_name );
2443
2444 if (mgsl_paranoia_check(info, tty->device, "mgsl_flush_buffer"))
2445 return;
2446
2447 spin_lock_irqsave(&info->irq_spinlock,flags);
2448 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2449 del_timer(&info->tx_timer);
2450 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2451
2452 tty_wakeup(tty);
2453 }
2454
2455 /* mgsl_send_xchar()
2456 *
2457 * Send a high-priority XON/XOFF character
2458 *
2459 * Arguments: tty pointer to tty info structure
2460 * ch character to send
2461 * Return Value: None
2462 */
mgsl_send_xchar(struct tty_struct * tty,char ch)2463 static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2464 {
2465 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2466 unsigned long flags;
2467
2468 if (debug_level >= DEBUG_LEVEL_INFO)
2469 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2470 __FILE__,__LINE__, info->device_name, ch );
2471
2472 if (mgsl_paranoia_check(info, tty->device, "mgsl_send_xchar"))
2473 return;
2474
2475 info->x_char = ch;
2476 if (ch) {
2477 /* Make sure transmit interrupts are on */
2478 spin_lock_irqsave(&info->irq_spinlock,flags);
2479 if (!info->tx_enabled)
2480 usc_start_transmitter(info);
2481 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2482 }
2483 } /* end of mgsl_send_xchar() */
2484
2485 /* mgsl_throttle()
2486 *
2487 * Signal remote device to throttle send data (our receive data)
2488 *
2489 * Arguments: tty pointer to tty info structure
2490 * Return Value: None
2491 */
mgsl_throttle(struct tty_struct * tty)2492 static void mgsl_throttle(struct tty_struct * tty)
2493 {
2494 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2495 unsigned long flags;
2496
2497 if (debug_level >= DEBUG_LEVEL_INFO)
2498 printk("%s(%d):mgsl_throttle(%s) entry\n",
2499 __FILE__,__LINE__, info->device_name );
2500
2501 if (mgsl_paranoia_check(info, tty->device, "mgsl_throttle"))
2502 return;
2503
2504 if (I_IXOFF(tty))
2505 mgsl_send_xchar(tty, STOP_CHAR(tty));
2506
2507 if (tty->termios->c_cflag & CRTSCTS) {
2508 spin_lock_irqsave(&info->irq_spinlock,flags);
2509 info->serial_signals &= ~SerialSignal_RTS;
2510 usc_set_serial_signals(info);
2511 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2512 }
2513 } /* end of mgsl_throttle() */
2514
2515 /* mgsl_unthrottle()
2516 *
2517 * Signal remote device to stop throttling send data (our receive data)
2518 *
2519 * Arguments: tty pointer to tty info structure
2520 * Return Value: None
2521 */
mgsl_unthrottle(struct tty_struct * tty)2522 static void mgsl_unthrottle(struct tty_struct * tty)
2523 {
2524 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2525 unsigned long flags;
2526
2527 if (debug_level >= DEBUG_LEVEL_INFO)
2528 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2529 __FILE__,__LINE__, info->device_name );
2530
2531 if (mgsl_paranoia_check(info, tty->device, "mgsl_unthrottle"))
2532 return;
2533
2534 if (I_IXOFF(tty)) {
2535 if (info->x_char)
2536 info->x_char = 0;
2537 else
2538 mgsl_send_xchar(tty, START_CHAR(tty));
2539 }
2540
2541 if (tty->termios->c_cflag & CRTSCTS) {
2542 spin_lock_irqsave(&info->irq_spinlock,flags);
2543 info->serial_signals |= SerialSignal_RTS;
2544 usc_set_serial_signals(info);
2545 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2546 }
2547
2548 } /* end of mgsl_unthrottle() */
2549
2550 /* mgsl_get_stats()
2551 *
2552 * get the current serial parameters information
2553 *
2554 * Arguments: info pointer to device instance data
2555 * user_icount pointer to buffer to hold returned stats
2556 *
2557 * Return Value: 0 if success, otherwise error code
2558 */
mgsl_get_stats(struct mgsl_struct * info,struct mgsl_icount * user_icount)2559 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount *user_icount)
2560 {
2561 int err;
2562
2563 if (debug_level >= DEBUG_LEVEL_INFO)
2564 printk("%s(%d):mgsl_get_params(%s)\n",
2565 __FILE__,__LINE__, info->device_name);
2566
2567 COPY_TO_USER(err,user_icount, &info->icount, sizeof(struct mgsl_icount));
2568 if (err) {
2569 if ( debug_level >= DEBUG_LEVEL_INFO )
2570 printk( "%s(%d):mgsl_get_stats(%s) user buffer copy failed\n",
2571 __FILE__,__LINE__,info->device_name);
2572 return -EFAULT;
2573 }
2574
2575 return 0;
2576
2577 } /* end of mgsl_get_stats() */
2578
2579 /* mgsl_get_params()
2580 *
2581 * get the current serial parameters information
2582 *
2583 * Arguments: info pointer to device instance data
2584 * user_params pointer to buffer to hold returned params
2585 *
2586 * Return Value: 0 if success, otherwise error code
2587 */
mgsl_get_params(struct mgsl_struct * info,MGSL_PARAMS * user_params)2588 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS *user_params)
2589 {
2590 int err;
2591 if (debug_level >= DEBUG_LEVEL_INFO)
2592 printk("%s(%d):mgsl_get_params(%s)\n",
2593 __FILE__,__LINE__, info->device_name);
2594
2595 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2596 if (err) {
2597 if ( debug_level >= DEBUG_LEVEL_INFO )
2598 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2599 __FILE__,__LINE__,info->device_name);
2600 return -EFAULT;
2601 }
2602
2603 return 0;
2604
2605 } /* end of mgsl_get_params() */
2606
2607 /* mgsl_set_params()
2608 *
2609 * set the serial parameters
2610 *
2611 * Arguments:
2612 *
2613 * info pointer to device instance data
2614 * new_params user buffer containing new serial params
2615 *
2616 * Return Value: 0 if success, otherwise error code
2617 */
mgsl_set_params(struct mgsl_struct * info,MGSL_PARAMS * new_params)2618 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS *new_params)
2619 {
2620 unsigned long flags;
2621 MGSL_PARAMS tmp_params;
2622 int err;
2623
2624 if (debug_level >= DEBUG_LEVEL_INFO)
2625 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2626 info->device_name );
2627 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2628 if (err) {
2629 if ( debug_level >= DEBUG_LEVEL_INFO )
2630 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2631 __FILE__,__LINE__,info->device_name);
2632 return -EFAULT;
2633 }
2634
2635 spin_lock_irqsave(&info->irq_spinlock,flags);
2636 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2637 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2638
2639 mgsl_change_params(info);
2640
2641 return 0;
2642
2643 } /* end of mgsl_set_params() */
2644
2645 /* mgsl_get_txidle()
2646 *
2647 * get the current transmit idle mode
2648 *
2649 * Arguments: info pointer to device instance data
2650 * idle_mode pointer to buffer to hold returned idle mode
2651 *
2652 * Return Value: 0 if success, otherwise error code
2653 */
mgsl_get_txidle(struct mgsl_struct * info,int * idle_mode)2654 static int mgsl_get_txidle(struct mgsl_struct * info, int*idle_mode)
2655 {
2656 int err;
2657
2658 if (debug_level >= DEBUG_LEVEL_INFO)
2659 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2660 __FILE__,__LINE__, info->device_name, info->idle_mode);
2661
2662 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2663 if (err) {
2664 if ( debug_level >= DEBUG_LEVEL_INFO )
2665 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2666 __FILE__,__LINE__,info->device_name);
2667 return -EFAULT;
2668 }
2669
2670 return 0;
2671
2672 } /* end of mgsl_get_txidle() */
2673
2674 /* mgsl_set_txidle() service ioctl to set transmit idle mode
2675 *
2676 * Arguments: info pointer to device instance data
2677 * idle_mode new idle mode
2678 *
2679 * Return Value: 0 if success, otherwise error code
2680 */
mgsl_set_txidle(struct mgsl_struct * info,int idle_mode)2681 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2682 {
2683 unsigned long flags;
2684
2685 if (debug_level >= DEBUG_LEVEL_INFO)
2686 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2687 info->device_name, idle_mode );
2688
2689 spin_lock_irqsave(&info->irq_spinlock,flags);
2690 info->idle_mode = idle_mode;
2691 usc_set_txidle( info );
2692 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2693 return 0;
2694
2695 } /* end of mgsl_set_txidle() */
2696
2697 /* mgsl_txenable()
2698 *
2699 * enable or disable the transmitter
2700 *
2701 * Arguments:
2702 *
2703 * info pointer to device instance data
2704 * enable 1 = enable, 0 = disable
2705 *
2706 * Return Value: 0 if success, otherwise error code
2707 */
mgsl_txenable(struct mgsl_struct * info,int enable)2708 static int mgsl_txenable(struct mgsl_struct * info, int enable)
2709 {
2710 unsigned long flags;
2711
2712 if (debug_level >= DEBUG_LEVEL_INFO)
2713 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2714 info->device_name, enable);
2715
2716 spin_lock_irqsave(&info->irq_spinlock,flags);
2717 if ( enable ) {
2718 if ( !info->tx_enabled ) {
2719
2720 usc_start_transmitter(info);
2721 /*--------------------------------------------------
2722 * if HDLC/SDLC Loop mode, attempt to insert the
2723 * station in the 'loop' by setting CMR:13. Upon
2724 * receipt of the next GoAhead (RxAbort) sequence,
2725 * the OnLoop indicator (CCSR:7) should go active
2726 * to indicate that we are on the loop
2727 *--------------------------------------------------*/
2728 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2729 usc_loopmode_insert_request( info );
2730 }
2731 } else {
2732 if ( info->tx_enabled )
2733 usc_stop_transmitter(info);
2734 }
2735 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2736 return 0;
2737
2738 } /* end of mgsl_txenable() */
2739
2740 /* mgsl_txabort() abort send HDLC frame
2741 *
2742 * Arguments: info pointer to device instance data
2743 * Return Value: 0 if success, otherwise error code
2744 */
mgsl_txabort(struct mgsl_struct * info)2745 static int mgsl_txabort(struct mgsl_struct * info)
2746 {
2747 unsigned long flags;
2748
2749 if (debug_level >= DEBUG_LEVEL_INFO)
2750 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2751 info->device_name);
2752
2753 spin_lock_irqsave(&info->irq_spinlock,flags);
2754 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2755 {
2756 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2757 usc_loopmode_cancel_transmit( info );
2758 else
2759 usc_TCmd(info,TCmd_SendAbort);
2760 }
2761 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2762 return 0;
2763
2764 } /* end of mgsl_txabort() */
2765
2766 /* mgsl_rxenable() enable or disable the receiver
2767 *
2768 * Arguments: info pointer to device instance data
2769 * enable 1 = enable, 0 = disable
2770 * Return Value: 0 if success, otherwise error code
2771 */
mgsl_rxenable(struct mgsl_struct * info,int enable)2772 static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2773 {
2774 unsigned long flags;
2775
2776 if (debug_level >= DEBUG_LEVEL_INFO)
2777 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2778 info->device_name, enable);
2779
2780 spin_lock_irqsave(&info->irq_spinlock,flags);
2781 if ( enable ) {
2782 if ( !info->rx_enabled )
2783 usc_start_receiver(info);
2784 } else {
2785 if ( info->rx_enabled )
2786 usc_stop_receiver(info);
2787 }
2788 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2789 return 0;
2790
2791 } /* end of mgsl_rxenable() */
2792
2793 /* mgsl_wait_event() wait for specified event to occur
2794 *
2795 * Arguments: info pointer to device instance data
2796 * mask pointer to bitmask of events to wait for
2797 * Return Value: 0 if successful and bit mask updated with
2798 * of events triggerred,
2799 * otherwise error code
2800 */
mgsl_wait_event(struct mgsl_struct * info,int * mask_ptr)2801 static int mgsl_wait_event(struct mgsl_struct * info, int * mask_ptr)
2802 {
2803 unsigned long flags;
2804 int s;
2805 int rc=0;
2806 struct mgsl_icount cprev, cnow;
2807 int events;
2808 int mask;
2809 struct _input_signal_events oldsigs, newsigs;
2810 DECLARE_WAITQUEUE(wait, current);
2811
2812 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2813 if (rc) {
2814 return -EFAULT;
2815 }
2816
2817 if (debug_level >= DEBUG_LEVEL_INFO)
2818 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2819 info->device_name, mask);
2820
2821 spin_lock_irqsave(&info->irq_spinlock,flags);
2822
2823 /* return immediately if state matches requested events */
2824 usc_get_serial_signals(info);
2825 s = info->serial_signals;
2826 events = mask &
2827 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2828 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2829 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2830 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2831 if (events) {
2832 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2833 goto exit;
2834 }
2835
2836 /* save current irq counts */
2837 cprev = info->icount;
2838 oldsigs = info->input_signal_events;
2839
2840 /* enable hunt and idle irqs if needed */
2841 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2842 u16 oldreg = usc_InReg(info,RICR);
2843 u16 newreg = oldreg +
2844 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2845 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2846 if (oldreg != newreg)
2847 usc_OutReg(info, RICR, newreg);
2848 }
2849
2850 set_current_state(TASK_INTERRUPTIBLE);
2851 add_wait_queue(&info->event_wait_q, &wait);
2852
2853 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2854
2855
2856 for(;;) {
2857 schedule();
2858 if (signal_pending(current)) {
2859 rc = -ERESTARTSYS;
2860 break;
2861 }
2862
2863 /* get current irq counts */
2864 spin_lock_irqsave(&info->irq_spinlock,flags);
2865 cnow = info->icount;
2866 newsigs = info->input_signal_events;
2867 set_current_state(TASK_INTERRUPTIBLE);
2868 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2869
2870 /* if no change, wait aborted for some reason */
2871 if (newsigs.dsr_up == oldsigs.dsr_up &&
2872 newsigs.dsr_down == oldsigs.dsr_down &&
2873 newsigs.dcd_up == oldsigs.dcd_up &&
2874 newsigs.dcd_down == oldsigs.dcd_down &&
2875 newsigs.cts_up == oldsigs.cts_up &&
2876 newsigs.cts_down == oldsigs.cts_down &&
2877 newsigs.ri_up == oldsigs.ri_up &&
2878 newsigs.ri_down == oldsigs.ri_down &&
2879 cnow.exithunt == cprev.exithunt &&
2880 cnow.rxidle == cprev.rxidle) {
2881 rc = -EIO;
2882 break;
2883 }
2884
2885 events = mask &
2886 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2887 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2888 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2889 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2890 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2891 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2892 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2893 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2894 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2895 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2896 if (events)
2897 break;
2898
2899 cprev = cnow;
2900 oldsigs = newsigs;
2901 }
2902
2903 remove_wait_queue(&info->event_wait_q, &wait);
2904 set_current_state(TASK_RUNNING);
2905
2906 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2907 spin_lock_irqsave(&info->irq_spinlock,flags);
2908 if (!waitqueue_active(&info->event_wait_q)) {
2909 /* disable enable exit hunt mode/idle rcvd IRQs */
2910 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2911 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2912 }
2913 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2914 }
2915 exit:
2916 if ( rc == 0 )
2917 PUT_USER(rc, events, mask_ptr);
2918
2919 return rc;
2920
2921 } /* end of mgsl_wait_event() */
2922
modem_input_wait(struct mgsl_struct * info,int arg)2923 static int modem_input_wait(struct mgsl_struct *info,int arg)
2924 {
2925 unsigned long flags;
2926 int rc;
2927 struct mgsl_icount cprev, cnow;
2928 DECLARE_WAITQUEUE(wait, current);
2929
2930 /* save current irq counts */
2931 spin_lock_irqsave(&info->irq_spinlock,flags);
2932 cprev = info->icount;
2933 add_wait_queue(&info->status_event_wait_q, &wait);
2934 set_current_state(TASK_INTERRUPTIBLE);
2935 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2936
2937 for(;;) {
2938 schedule();
2939 if (signal_pending(current)) {
2940 rc = -ERESTARTSYS;
2941 break;
2942 }
2943
2944 /* get new irq counts */
2945 spin_lock_irqsave(&info->irq_spinlock,flags);
2946 cnow = info->icount;
2947 set_current_state(TASK_INTERRUPTIBLE);
2948 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2949
2950 /* if no change, wait aborted for some reason */
2951 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2952 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2953 rc = -EIO;
2954 break;
2955 }
2956
2957 /* check for change in caller specified modem input */
2958 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2959 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2960 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2961 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2962 rc = 0;
2963 break;
2964 }
2965
2966 cprev = cnow;
2967 }
2968 remove_wait_queue(&info->status_event_wait_q, &wait);
2969 set_current_state(TASK_RUNNING);
2970 return rc;
2971 }
2972
2973 /* get_modem_info()
2974 *
2975 * Read the state of the serial control and
2976 * status signals and return to caller.
2977 *
2978 * Arguments: info pointer to device instance data
2979 * value pointer to int to hold returned info
2980 *
2981 * Return Value: 0 if success, otherwise error code
2982 */
get_modem_info(struct mgsl_struct * info,unsigned int * value)2983 static int get_modem_info(struct mgsl_struct * info, unsigned int *value)
2984 {
2985 unsigned int result = 0;
2986 unsigned long flags;
2987 int err;
2988
2989 spin_lock_irqsave(&info->irq_spinlock,flags);
2990 usc_get_serial_signals(info);
2991 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2992
2993 if (info->serial_signals & SerialSignal_RTS)
2994 result |= TIOCM_RTS;
2995 if (info->serial_signals & SerialSignal_DTR)
2996 result |= TIOCM_DTR;
2997 if (info->serial_signals & SerialSignal_DCD)
2998 result |= TIOCM_CAR;
2999 if (info->serial_signals & SerialSignal_RI)
3000 result |= TIOCM_RNG;
3001 if (info->serial_signals & SerialSignal_DSR)
3002 result |= TIOCM_DSR;
3003 if (info->serial_signals & SerialSignal_CTS)
3004 result |= TIOCM_CTS;
3005
3006 if (debug_level >= DEBUG_LEVEL_INFO)
3007 printk("%s(%d):mgsl_get_modem_info %s value=%08X\n",
3008 __FILE__,__LINE__, info->device_name, result );
3009
3010 PUT_USER(err,result,value);
3011 return err;
3012 } /* end of get_modem_info() */
3013
3014 /* set_modem_info()
3015 *
3016 * Set the state of the modem control signals (DTR/RTS)
3017 *
3018 * Arguments:
3019 *
3020 * info pointer to device instance data
3021 * cmd signal command: TIOCMBIS = set bit TIOCMBIC = clear bit
3022 * TIOCMSET = set/clear signal values
3023 * value bit mask for command
3024 *
3025 * Return Value: 0 if success, otherwise error code
3026 */
set_modem_info(struct mgsl_struct * info,unsigned int cmd,unsigned int * value)3027 static int set_modem_info(struct mgsl_struct * info, unsigned int cmd,
3028 unsigned int *value)
3029 {
3030 int error;
3031 unsigned int arg;
3032 unsigned long flags;
3033
3034 if (debug_level >= DEBUG_LEVEL_INFO)
3035 printk("%s(%d):mgsl_set_modem_info %s\n", __FILE__,__LINE__,
3036 info->device_name );
3037
3038 GET_USER(error,arg,value);
3039 if (error)
3040 return error;
3041
3042 switch (cmd) {
3043 case TIOCMBIS:
3044 if (arg & TIOCM_RTS)
3045 info->serial_signals |= SerialSignal_RTS;
3046 if (arg & TIOCM_DTR)
3047 info->serial_signals |= SerialSignal_DTR;
3048 break;
3049 case TIOCMBIC:
3050 if (arg & TIOCM_RTS)
3051 info->serial_signals &= ~SerialSignal_RTS;
3052 if (arg & TIOCM_DTR)
3053 info->serial_signals &= ~SerialSignal_DTR;
3054 break;
3055 case TIOCMSET:
3056 if (arg & TIOCM_RTS)
3057 info->serial_signals |= SerialSignal_RTS;
3058 else
3059 info->serial_signals &= ~SerialSignal_RTS;
3060
3061 if (arg & TIOCM_DTR)
3062 info->serial_signals |= SerialSignal_DTR;
3063 else
3064 info->serial_signals &= ~SerialSignal_DTR;
3065 break;
3066 default:
3067 return -EINVAL;
3068 }
3069
3070 spin_lock_irqsave(&info->irq_spinlock,flags);
3071 usc_set_serial_signals(info);
3072 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3073
3074 return 0;
3075
3076 } /* end of set_modem_info() */
3077
3078 /* mgsl_break() Set or clear transmit break condition
3079 *
3080 * Arguments: tty pointer to tty instance data
3081 * break_state -1=set break condition, 0=clear
3082 * Return Value: None
3083 */
mgsl_break(struct tty_struct * tty,int break_state)3084 static void mgsl_break(struct tty_struct *tty, int break_state)
3085 {
3086 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3087 unsigned long flags;
3088
3089 if (debug_level >= DEBUG_LEVEL_INFO)
3090 printk("%s(%d):mgsl_break(%s,%d)\n",
3091 __FILE__,__LINE__, info->device_name, break_state);
3092
3093 if (mgsl_paranoia_check(info, tty->device, "mgsl_break"))
3094 return;
3095
3096 spin_lock_irqsave(&info->irq_spinlock,flags);
3097 if (break_state == -1)
3098 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
3099 else
3100 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
3101 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3102
3103 } /* end of mgsl_break() */
3104
3105 /* mgsl_ioctl() Service an IOCTL request
3106 *
3107 * Arguments:
3108 *
3109 * tty pointer to tty instance data
3110 * file pointer to associated file object for device
3111 * cmd IOCTL command code
3112 * arg command argument/context
3113 *
3114 * Return Value: 0 if success, otherwise error code
3115 */
mgsl_ioctl(struct tty_struct * tty,struct file * file,unsigned int cmd,unsigned long arg)3116 static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
3117 unsigned int cmd, unsigned long arg)
3118 {
3119 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3120
3121 if (debug_level >= DEBUG_LEVEL_INFO)
3122 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
3123 info->device_name, cmd );
3124
3125 if (mgsl_paranoia_check(info, tty->device, "mgsl_ioctl"))
3126 return -ENODEV;
3127
3128 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
3129 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
3130 if (tty->flags & (1 << TTY_IO_ERROR))
3131 return -EIO;
3132 }
3133
3134 return mgsl_ioctl_common(info, cmd, arg);
3135 }
3136
mgsl_ioctl_common(struct mgsl_struct * info,unsigned int cmd,unsigned long arg)3137 int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
3138 {
3139 int error;
3140 struct mgsl_icount cnow; /* kernel counter temps */
3141 struct serial_icounter_struct *p_cuser; /* user space */
3142 unsigned long flags;
3143
3144 switch (cmd) {
3145 case TIOCMGET:
3146 return get_modem_info(info, (unsigned int *) arg);
3147 case TIOCMBIS:
3148 case TIOCMBIC:
3149 case TIOCMSET:
3150 return set_modem_info(info, cmd, (unsigned int *) arg);
3151 case MGSL_IOCGPARAMS:
3152 return mgsl_get_params(info,(MGSL_PARAMS *)arg);
3153 case MGSL_IOCSPARAMS:
3154 return mgsl_set_params(info,(MGSL_PARAMS *)arg);
3155 case MGSL_IOCGTXIDLE:
3156 return mgsl_get_txidle(info,(int*)arg);
3157 case MGSL_IOCSTXIDLE:
3158 return mgsl_set_txidle(info,(int)arg);
3159 case MGSL_IOCTXENABLE:
3160 return mgsl_txenable(info,(int)arg);
3161 case MGSL_IOCRXENABLE:
3162 return mgsl_rxenable(info,(int)arg);
3163 case MGSL_IOCTXABORT:
3164 return mgsl_txabort(info);
3165 case MGSL_IOCGSTATS:
3166 return mgsl_get_stats(info,(struct mgsl_icount*)arg);
3167 case MGSL_IOCWAITEVENT:
3168 return mgsl_wait_event(info,(int*)arg);
3169 case MGSL_IOCLOOPTXDONE:
3170 return mgsl_loopmode_send_done(info);
3171 case MGSL_IOCCLRMODCOUNT:
3172 while(MOD_IN_USE)
3173 MOD_DEC_USE_COUNT;
3174 return 0;
3175
3176 /* Wait for modem input (DCD,RI,DSR,CTS) change
3177 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3178 */
3179 case TIOCMIWAIT:
3180 return modem_input_wait(info,(int)arg);
3181
3182 /*
3183 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
3184 * Return: write counters to the user passed counter struct
3185 * NB: both 1->0 and 0->1 transitions are counted except for
3186 * RI where only 0->1 is counted.
3187 */
3188 case TIOCGICOUNT:
3189 spin_lock_irqsave(&info->irq_spinlock,flags);
3190 cnow = info->icount;
3191 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3192 p_cuser = (struct serial_icounter_struct *) arg;
3193 PUT_USER(error,cnow.cts, &p_cuser->cts);
3194 if (error) return error;
3195 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
3196 if (error) return error;
3197 PUT_USER(error,cnow.rng, &p_cuser->rng);
3198 if (error) return error;
3199 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
3200 if (error) return error;
3201 PUT_USER(error,cnow.rx, &p_cuser->rx);
3202 if (error) return error;
3203 PUT_USER(error,cnow.tx, &p_cuser->tx);
3204 if (error) return error;
3205 PUT_USER(error,cnow.frame, &p_cuser->frame);
3206 if (error) return error;
3207 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
3208 if (error) return error;
3209 PUT_USER(error,cnow.parity, &p_cuser->parity);
3210 if (error) return error;
3211 PUT_USER(error,cnow.brk, &p_cuser->brk);
3212 if (error) return error;
3213 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
3214 if (error) return error;
3215 return 0;
3216 default:
3217 return -ENOIOCTLCMD;
3218 }
3219 return 0;
3220 }
3221
3222 /* mgsl_set_termios()
3223 *
3224 * Set new termios settings
3225 *
3226 * Arguments:
3227 *
3228 * tty pointer to tty structure
3229 * termios pointer to buffer to hold returned old termios
3230 *
3231 * Return Value: None
3232 */
mgsl_set_termios(struct tty_struct * tty,struct termios * old_termios)3233 static void mgsl_set_termios(struct tty_struct *tty, struct termios *old_termios)
3234 {
3235 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
3236 unsigned long flags;
3237
3238 if (debug_level >= DEBUG_LEVEL_INFO)
3239 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3240 tty->driver.name );
3241
3242 /* just return if nothing has changed */
3243 if ((tty->termios->c_cflag == old_termios->c_cflag)
3244 && (RELEVANT_IFLAG(tty->termios->c_iflag)
3245 == RELEVANT_IFLAG(old_termios->c_iflag)))
3246 return;
3247
3248 mgsl_change_params(info);
3249
3250 /* Handle transition to B0 status */
3251 if (old_termios->c_cflag & CBAUD &&
3252 !(tty->termios->c_cflag & CBAUD)) {
3253 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3254 spin_lock_irqsave(&info->irq_spinlock,flags);
3255 usc_set_serial_signals(info);
3256 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3257 }
3258
3259 /* Handle transition away from B0 status */
3260 if (!(old_termios->c_cflag & CBAUD) &&
3261 tty->termios->c_cflag & CBAUD) {
3262 info->serial_signals |= SerialSignal_DTR;
3263 if (!(tty->termios->c_cflag & CRTSCTS) ||
3264 !test_bit(TTY_THROTTLED, &tty->flags)) {
3265 info->serial_signals |= SerialSignal_RTS;
3266 }
3267 spin_lock_irqsave(&info->irq_spinlock,flags);
3268 usc_set_serial_signals(info);
3269 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3270 }
3271
3272 /* Handle turning off CRTSCTS */
3273 if (old_termios->c_cflag & CRTSCTS &&
3274 !(tty->termios->c_cflag & CRTSCTS)) {
3275 tty->hw_stopped = 0;
3276 mgsl_start(tty);
3277 }
3278
3279 } /* end of mgsl_set_termios() */
3280
3281 /* mgsl_close()
3282 *
3283 * Called when port is closed. Wait for remaining data to be
3284 * sent. Disable port and free resources.
3285 *
3286 * Arguments:
3287 *
3288 * tty pointer to open tty structure
3289 * filp pointer to open file object
3290 *
3291 * Return Value: None
3292 */
mgsl_close(struct tty_struct * tty,struct file * filp)3293 static void mgsl_close(struct tty_struct *tty, struct file * filp)
3294 {
3295 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3296
3297 if (mgsl_paranoia_check(info, tty->device, "mgsl_close"))
3298 return;
3299
3300 if (debug_level >= DEBUG_LEVEL_INFO)
3301 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3302 __FILE__,__LINE__, info->device_name, info->count);
3303
3304 if (!info->count)
3305 return;
3306
3307 if (tty_hung_up_p(filp))
3308 goto cleanup;
3309
3310 if ((tty->count == 1) && (info->count != 1)) {
3311 /*
3312 * tty->count is 1 and the tty structure will be freed.
3313 * info->count should be one in this case.
3314 * if it's not, correct it so that the port is shutdown.
3315 */
3316 printk("mgsl_close: bad refcount; tty->count is 1, "
3317 "info->count is %d\n", info->count);
3318 info->count = 1;
3319 }
3320
3321 info->count--;
3322
3323 /* if at least one open remaining, leave hardware active */
3324 if (info->count)
3325 goto cleanup;
3326
3327 info->flags |= ASYNC_CLOSING;
3328
3329 /* Save the termios structure, since this port may have
3330 * separate termios for callout and dialin.
3331 */
3332 if (info->flags & ASYNC_NORMAL_ACTIVE)
3333 info->normal_termios = *tty->termios;
3334 if (info->flags & ASYNC_CALLOUT_ACTIVE)
3335 info->callout_termios = *tty->termios;
3336
3337 /* set tty->closing to notify line discipline to
3338 * only process XON/XOFF characters. Only the N_TTY
3339 * discipline appears to use this (ppp does not).
3340 */
3341 tty->closing = 1;
3342
3343 /* wait for transmit data to clear all layers */
3344
3345 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
3346 if (debug_level >= DEBUG_LEVEL_INFO)
3347 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n",
3348 __FILE__,__LINE__, info->device_name );
3349 tty_wait_until_sent(tty, info->closing_wait);
3350 }
3351
3352 if (info->flags & ASYNC_INITIALIZED)
3353 mgsl_wait_until_sent(tty, info->timeout);
3354
3355 if (tty->driver.flush_buffer)
3356 tty->driver.flush_buffer(tty);
3357
3358 tty_ldisc_flush(tty);
3359
3360 shutdown(info);
3361
3362 tty->closing = 0;
3363 info->tty = 0;
3364
3365 if (info->blocked_open) {
3366 if (info->close_delay) {
3367 set_current_state(TASK_INTERRUPTIBLE);
3368 schedule_timeout(info->close_delay);
3369 }
3370 wake_up_interruptible(&info->open_wait);
3371 }
3372
3373 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE|
3374 ASYNC_CLOSING);
3375
3376 wake_up_interruptible(&info->close_wait);
3377
3378 cleanup:
3379 if (debug_level >= DEBUG_LEVEL_INFO)
3380 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3381 tty->driver.name, info->count);
3382 if(MOD_IN_USE)
3383 MOD_DEC_USE_COUNT;
3384
3385 } /* end of mgsl_close() */
3386
3387 /* mgsl_wait_until_sent()
3388 *
3389 * Wait until the transmitter is empty.
3390 *
3391 * Arguments:
3392 *
3393 * tty pointer to tty info structure
3394 * timeout time to wait for send completion
3395 *
3396 * Return Value: None
3397 */
mgsl_wait_until_sent(struct tty_struct * tty,int timeout)3398 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3399 {
3400 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3401 unsigned long orig_jiffies, char_time;
3402
3403 if (!info )
3404 return;
3405
3406 if (debug_level >= DEBUG_LEVEL_INFO)
3407 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3408 __FILE__,__LINE__, info->device_name );
3409
3410 if (mgsl_paranoia_check(info, tty->device, "mgsl_wait_until_sent"))
3411 return;
3412
3413 if (!(info->flags & ASYNC_INITIALIZED))
3414 goto exit;
3415
3416 orig_jiffies = jiffies;
3417
3418 /* Set check interval to 1/5 of estimated time to
3419 * send a character, and make it at least 1. The check
3420 * interval should also be less than the timeout.
3421 * Note: use tight timings here to satisfy the NIST-PCTS.
3422 */
3423
3424 if ( info->params.data_rate ) {
3425 char_time = info->timeout/(32 * 5);
3426 if (!char_time)
3427 char_time++;
3428 } else
3429 char_time = 1;
3430
3431 if (timeout)
3432 char_time = MIN(char_time, timeout);
3433
3434 if ( info->params.mode == MGSL_MODE_HDLC ||
3435 info->params.mode == MGSL_MODE_RAW ) {
3436 while (info->tx_active) {
3437 set_current_state(TASK_INTERRUPTIBLE);
3438 schedule_timeout(char_time);
3439 if (signal_pending(current))
3440 break;
3441 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3442 break;
3443 }
3444 } else {
3445 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3446 info->tx_enabled) {
3447 set_current_state(TASK_INTERRUPTIBLE);
3448 schedule_timeout(char_time);
3449 if (signal_pending(current))
3450 break;
3451 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3452 break;
3453 }
3454 }
3455
3456 exit:
3457 if (debug_level >= DEBUG_LEVEL_INFO)
3458 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3459 __FILE__,__LINE__, info->device_name );
3460
3461 } /* end of mgsl_wait_until_sent() */
3462
3463 /* mgsl_hangup()
3464 *
3465 * Called by tty_hangup() when a hangup is signaled.
3466 * This is the same as to closing all open files for the port.
3467 *
3468 * Arguments: tty pointer to associated tty object
3469 * Return Value: None
3470 */
mgsl_hangup(struct tty_struct * tty)3471 static void mgsl_hangup(struct tty_struct *tty)
3472 {
3473 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3474
3475 if (debug_level >= DEBUG_LEVEL_INFO)
3476 printk("%s(%d):mgsl_hangup(%s)\n",
3477 __FILE__,__LINE__, info->device_name );
3478
3479 if (mgsl_paranoia_check(info, tty->device, "mgsl_hangup"))
3480 return;
3481
3482 mgsl_flush_buffer(tty);
3483 shutdown(info);
3484
3485 info->count = 0;
3486 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE);
3487 info->tty = 0;
3488
3489 wake_up_interruptible(&info->open_wait);
3490
3491 } /* end of mgsl_hangup() */
3492
3493 /* block_til_ready()
3494 *
3495 * Block the current process until the specified port
3496 * is ready to be opened.
3497 *
3498 * Arguments:
3499 *
3500 * tty pointer to tty info structure
3501 * filp pointer to open file object
3502 * info pointer to device instance data
3503 *
3504 * Return Value: 0 if success, otherwise error code
3505 */
block_til_ready(struct tty_struct * tty,struct file * filp,struct mgsl_struct * info)3506 static int block_til_ready(struct tty_struct *tty, struct file * filp,
3507 struct mgsl_struct *info)
3508 {
3509 DECLARE_WAITQUEUE(wait, current);
3510 int retval;
3511 int do_clocal = 0, extra_count = 0;
3512 unsigned long flags;
3513
3514 if (debug_level >= DEBUG_LEVEL_INFO)
3515 printk("%s(%d):block_til_ready on %s\n",
3516 __FILE__,__LINE__, tty->driver.name );
3517
3518 if (tty->driver.subtype == SERIAL_TYPE_CALLOUT) {
3519 /* this is a callout device */
3520 /* just verify that normal device is not in use */
3521 if (info->flags & ASYNC_NORMAL_ACTIVE)
3522 return -EBUSY;
3523 if ((info->flags & ASYNC_CALLOUT_ACTIVE) &&
3524 (info->flags & ASYNC_SESSION_LOCKOUT) &&
3525 (info->session != current->session))
3526 return -EBUSY;
3527 if ((info->flags & ASYNC_CALLOUT_ACTIVE) &&
3528 (info->flags & ASYNC_PGRP_LOCKOUT) &&
3529 (info->pgrp != current->pgrp))
3530 return -EBUSY;
3531 info->flags |= ASYNC_CALLOUT_ACTIVE;
3532 return 0;
3533 }
3534
3535 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3536 /* nonblock mode is set or port is not enabled */
3537 /* just verify that callout device is not active */
3538 if (info->flags & ASYNC_CALLOUT_ACTIVE)
3539 return -EBUSY;
3540 info->flags |= ASYNC_NORMAL_ACTIVE;
3541 return 0;
3542 }
3543
3544 if (info->flags & ASYNC_CALLOUT_ACTIVE) {
3545 if (info->normal_termios.c_cflag & CLOCAL)
3546 do_clocal = 1;
3547 } else {
3548 if (tty->termios->c_cflag & CLOCAL)
3549 do_clocal = 1;
3550 }
3551
3552 /* Wait for carrier detect and the line to become
3553 * free (i.e., not in use by the callout). While we are in
3554 * this loop, info->count is dropped by one, so that
3555 * mgsl_close() knows when to free things. We restore it upon
3556 * exit, either normal or abnormal.
3557 */
3558
3559 retval = 0;
3560 add_wait_queue(&info->open_wait, &wait);
3561
3562 if (debug_level >= DEBUG_LEVEL_INFO)
3563 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3564 __FILE__,__LINE__, tty->driver.name, info->count );
3565
3566 save_flags(flags); cli();
3567 if (!tty_hung_up_p(filp)) {
3568 extra_count = 1;
3569 info->count--;
3570 }
3571 restore_flags(flags);
3572 info->blocked_open++;
3573
3574 while (1) {
3575 if (!(info->flags & ASYNC_CALLOUT_ACTIVE) &&
3576 (tty->termios->c_cflag & CBAUD)) {
3577 spin_lock_irqsave(&info->irq_spinlock,flags);
3578 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3579 usc_set_serial_signals(info);
3580 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3581 }
3582
3583 set_current_state(TASK_INTERRUPTIBLE);
3584
3585 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){
3586 retval = (info->flags & ASYNC_HUP_NOTIFY) ?
3587 -EAGAIN : -ERESTARTSYS;
3588 break;
3589 }
3590
3591 spin_lock_irqsave(&info->irq_spinlock,flags);
3592 usc_get_serial_signals(info);
3593 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3594
3595 if (!(info->flags & ASYNC_CALLOUT_ACTIVE) &&
3596 !(info->flags & ASYNC_CLOSING) &&
3597 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) {
3598 break;
3599 }
3600
3601 if (signal_pending(current)) {
3602 retval = -ERESTARTSYS;
3603 break;
3604 }
3605
3606 if (debug_level >= DEBUG_LEVEL_INFO)
3607 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3608 __FILE__,__LINE__, tty->driver.name, info->count );
3609
3610 schedule();
3611 }
3612
3613 set_current_state(TASK_RUNNING);
3614 remove_wait_queue(&info->open_wait, &wait);
3615
3616 if (extra_count)
3617 info->count++;
3618 info->blocked_open--;
3619
3620 if (debug_level >= DEBUG_LEVEL_INFO)
3621 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3622 __FILE__,__LINE__, tty->driver.name, info->count );
3623
3624 if (!retval)
3625 info->flags |= ASYNC_NORMAL_ACTIVE;
3626
3627 return retval;
3628
3629 } /* end of block_til_ready() */
3630
3631 /* mgsl_open()
3632 *
3633 * Called when a port is opened. Init and enable port.
3634 * Perform serial-specific initialization for the tty structure.
3635 *
3636 * Arguments: tty pointer to tty info structure
3637 * filp associated file pointer
3638 *
3639 * Return Value: 0 if success, otherwise error code
3640 */
mgsl_open(struct tty_struct * tty,struct file * filp)3641 static int mgsl_open(struct tty_struct *tty, struct file * filp)
3642 {
3643 struct mgsl_struct *info;
3644 int retval, line;
3645 unsigned long page;
3646 unsigned long flags;
3647
3648 /* verify range of specified line number */
3649 line = MINOR(tty->device) - tty->driver.minor_start;
3650 if ((line < 0) || (line >= mgsl_device_count)) {
3651 printk("%s(%d):mgsl_open with illegal line #%d.\n",
3652 __FILE__,__LINE__,line);
3653 return -ENODEV;
3654 }
3655
3656 /* find the info structure for the specified line */
3657 info = mgsl_device_list;
3658 while(info && info->line != line)
3659 info = info->next_device;
3660 if (mgsl_paranoia_check(info, tty->device, "mgsl_open"))
3661 return -ENODEV;
3662
3663 tty->driver_data = info;
3664 info->tty = tty;
3665
3666 if (debug_level >= DEBUG_LEVEL_INFO)
3667 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3668 __FILE__,__LINE__,tty->driver.name, info->count);
3669
3670 MOD_INC_USE_COUNT;
3671
3672 /* If port is closing, signal caller to try again */
3673 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){
3674 if (info->flags & ASYNC_CLOSING)
3675 interruptible_sleep_on(&info->close_wait);
3676 retval = ((info->flags & ASYNC_HUP_NOTIFY) ?
3677 -EAGAIN : -ERESTARTSYS);
3678 goto cleanup;
3679 }
3680
3681 if (!tmp_buf) {
3682 page = get_free_page(GFP_KERNEL);
3683 if (!page) {
3684 retval = -ENOMEM;
3685 goto cleanup;
3686 }
3687 if (tmp_buf)
3688 free_page(page);
3689 else
3690 tmp_buf = (unsigned char *) page;
3691 }
3692
3693 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3694
3695 spin_lock_irqsave(&info->netlock, flags);
3696 if (info->netcount) {
3697 retval = -EBUSY;
3698 spin_unlock_irqrestore(&info->netlock, flags);
3699 goto cleanup;
3700 }
3701 info->count++;
3702 spin_unlock_irqrestore(&info->netlock, flags);
3703
3704 if (info->count == 1) {
3705 /* 1st open on this device, init hardware */
3706 retval = startup(info);
3707 if (retval < 0)
3708 goto cleanup;
3709 }
3710
3711 retval = block_til_ready(tty, filp, info);
3712 if (retval) {
3713 if (debug_level >= DEBUG_LEVEL_INFO)
3714 printk("%s(%d):block_til_ready(%s) returned %d\n",
3715 __FILE__,__LINE__, info->device_name, retval);
3716 goto cleanup;
3717 }
3718
3719 if ((info->count == 1) &&
3720 info->flags & ASYNC_SPLIT_TERMIOS) {
3721 if (tty->driver.subtype == SERIAL_TYPE_NORMAL)
3722 *tty->termios = info->normal_termios;
3723 else
3724 *tty->termios = info->callout_termios;
3725 mgsl_change_params(info);
3726 }
3727
3728 info->session = current->session;
3729 info->pgrp = current->pgrp;
3730
3731 if (debug_level >= DEBUG_LEVEL_INFO)
3732 printk("%s(%d):mgsl_open(%s) success\n",
3733 __FILE__,__LINE__, info->device_name);
3734 retval = 0;
3735
3736 cleanup:
3737 if (retval) {
3738 if (tty->count == 1)
3739 info->tty = 0; /* tty layer will release tty struct */
3740 if(MOD_IN_USE)
3741 MOD_DEC_USE_COUNT;
3742 if(info->count)
3743 info->count--;
3744 }
3745
3746 return retval;
3747
3748 } /* end of mgsl_open() */
3749
3750 /*
3751 * /proc fs routines....
3752 */
3753
line_info(char * buf,struct mgsl_struct * info)3754 static inline int line_info(char *buf, struct mgsl_struct *info)
3755 {
3756 char stat_buf[30];
3757 int ret;
3758 unsigned long flags;
3759
3760 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3761 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3762 info->device_name, info->io_base, info->irq_level,
3763 info->phys_memory_base, info->phys_lcr_base);
3764 } else {
3765 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d",
3766 info->device_name, info->io_base,
3767 info->irq_level, info->dma_level);
3768 }
3769
3770 /* output current serial signal states */
3771 spin_lock_irqsave(&info->irq_spinlock,flags);
3772 usc_get_serial_signals(info);
3773 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3774
3775 stat_buf[0] = 0;
3776 stat_buf[1] = 0;
3777 if (info->serial_signals & SerialSignal_RTS)
3778 strcat(stat_buf, "|RTS");
3779 if (info->serial_signals & SerialSignal_CTS)
3780 strcat(stat_buf, "|CTS");
3781 if (info->serial_signals & SerialSignal_DTR)
3782 strcat(stat_buf, "|DTR");
3783 if (info->serial_signals & SerialSignal_DSR)
3784 strcat(stat_buf, "|DSR");
3785 if (info->serial_signals & SerialSignal_DCD)
3786 strcat(stat_buf, "|CD");
3787 if (info->serial_signals & SerialSignal_RI)
3788 strcat(stat_buf, "|RI");
3789
3790 if (info->params.mode == MGSL_MODE_HDLC ||
3791 info->params.mode == MGSL_MODE_RAW ) {
3792 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d",
3793 info->icount.txok, info->icount.rxok);
3794 if (info->icount.txunder)
3795 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
3796 if (info->icount.txabort)
3797 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
3798 if (info->icount.rxshort)
3799 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
3800 if (info->icount.rxlong)
3801 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
3802 if (info->icount.rxover)
3803 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
3804 if (info->icount.rxcrc)
3805 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
3806 } else {
3807 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d",
3808 info->icount.tx, info->icount.rx);
3809 if (info->icount.frame)
3810 ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
3811 if (info->icount.parity)
3812 ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
3813 if (info->icount.brk)
3814 ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
3815 if (info->icount.overrun)
3816 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
3817 }
3818
3819 /* Append serial signal status to end */
3820 ret += sprintf(buf+ret, " %s\n", stat_buf+1);
3821
3822 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3823 info->tx_active,info->bh_requested,info->bh_running,
3824 info->pending_bh);
3825
3826 spin_lock_irqsave(&info->irq_spinlock,flags);
3827 {
3828 u16 Tcsr = usc_InReg( info, TCSR );
3829 u16 Tdmr = usc_InDmaReg( info, TDMR );
3830 u16 Ticr = usc_InReg( info, TICR );
3831 u16 Rscr = usc_InReg( info, RCSR );
3832 u16 Rdmr = usc_InDmaReg( info, RDMR );
3833 u16 Ricr = usc_InReg( info, RICR );
3834 u16 Icr = usc_InReg( info, ICR );
3835 u16 Dccr = usc_InReg( info, DCCR );
3836 u16 Tmr = usc_InReg( info, TMR );
3837 u16 Tccr = usc_InReg( info, TCCR );
3838 u16 Ccar = inw( info->io_base + CCAR );
3839 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3840 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3841 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3842 }
3843 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3844
3845 return ret;
3846
3847 } /* end of line_info() */
3848
3849 /* mgsl_read_proc()
3850 *
3851 * Called to print information about devices
3852 *
3853 * Arguments:
3854 * page page of memory to hold returned info
3855 * start
3856 * off
3857 * count
3858 * eof
3859 * data
3860 *
3861 * Return Value:
3862 */
mgsl_read_proc(char * page,char ** start,off_t off,int count,int * eof,void * data)3863 int mgsl_read_proc(char *page, char **start, off_t off, int count,
3864 int *eof, void *data)
3865 {
3866 int len = 0, l;
3867 off_t begin = 0;
3868 struct mgsl_struct *info;
3869
3870 len += sprintf(page, "synclink driver:%s\n", driver_version);
3871
3872 info = mgsl_device_list;
3873 while( info ) {
3874 l = line_info(page + len, info);
3875 len += l;
3876 if (len+begin > off+count)
3877 goto done;
3878 if (len+begin < off) {
3879 begin += len;
3880 len = 0;
3881 }
3882 info = info->next_device;
3883 }
3884
3885 *eof = 1;
3886 done:
3887 if (off >= len+begin)
3888 return 0;
3889 *start = page + (off-begin);
3890 return ((count < begin+len-off) ? count : begin+len-off);
3891
3892 } /* end of mgsl_read_proc() */
3893
3894 /* mgsl_allocate_dma_buffers()
3895 *
3896 * Allocate and format DMA buffers (ISA adapter)
3897 * or format shared memory buffers (PCI adapter).
3898 *
3899 * Arguments: info pointer to device instance data
3900 * Return Value: 0 if success, otherwise error
3901 */
mgsl_allocate_dma_buffers(struct mgsl_struct * info)3902 int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3903 {
3904 unsigned short BuffersPerFrame;
3905
3906 info->last_mem_alloc = 0;
3907
3908 /* Calculate the number of DMA buffers necessary to hold the */
3909 /* largest allowable frame size. Note: If the max frame size is */
3910 /* not an even multiple of the DMA buffer size then we need to */
3911 /* round the buffer count per frame up one. */
3912
3913 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3914 if ( info->max_frame_size % DMABUFFERSIZE )
3915 BuffersPerFrame++;
3916
3917 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3918 /*
3919 * The PCI adapter has 256KBytes of shared memory to use.
3920 * This is 64 PAGE_SIZE buffers.
3921 *
3922 * The first page is used for padding at this time so the
3923 * buffer list does not begin at offset 0 of the PCI
3924 * adapter's shared memory.
3925 *
3926 * The 2nd page is used for the buffer list. A 4K buffer
3927 * list can hold 128 DMA_BUFFER structures at 32 bytes
3928 * each.
3929 *
3930 * This leaves 62 4K pages.
3931 *
3932 * The next N pages are used for transmit frame(s). We
3933 * reserve enough 4K page blocks to hold the required
3934 * number of transmit dma buffers (num_tx_dma_buffers),
3935 * each of MaxFrameSize size.
3936 *
3937 * Of the remaining pages (62-N), determine how many can
3938 * be used to receive full MaxFrameSize inbound frames
3939 */
3940 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3941 info->rx_buffer_count = 62 - info->tx_buffer_count;
3942 } else {
3943 /* Calculate the number of PAGE_SIZE buffers needed for */
3944 /* receive and transmit DMA buffers. */
3945
3946
3947 /* Calculate the number of DMA buffers necessary to */
3948 /* hold 7 max size receive frames and one max size transmit frame. */
3949 /* The receive buffer count is bumped by one so we avoid an */
3950 /* End of List condition if all receive buffers are used when */
3951 /* using linked list DMA buffers. */
3952
3953 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3954 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3955
3956 /*
3957 * limit total TxBuffers & RxBuffers to 62 4K total
3958 * (ala PCI Allocation)
3959 */
3960
3961 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3962 info->rx_buffer_count = 62 - info->tx_buffer_count;
3963
3964 }
3965
3966 if ( debug_level >= DEBUG_LEVEL_INFO )
3967 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3968 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3969
3970 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3971 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3972 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3973 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3974 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3975 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3976 return -ENOMEM;
3977 }
3978
3979 mgsl_reset_rx_dma_buffers( info );
3980 mgsl_reset_tx_dma_buffers( info );
3981
3982 return 0;
3983
3984 } /* end of mgsl_allocate_dma_buffers() */
3985
3986 /*
3987 * mgsl_alloc_buffer_list_memory()
3988 *
3989 * Allocate a common DMA buffer for use as the
3990 * receive and transmit buffer lists.
3991 *
3992 * A buffer list is a set of buffer entries where each entry contains
3993 * a pointer to an actual buffer and a pointer to the next buffer entry
3994 * (plus some other info about the buffer).
3995 *
3996 * The buffer entries for a list are built to form a circular list so
3997 * that when the entire list has been traversed you start back at the
3998 * beginning.
3999 *
4000 * This function allocates memory for just the buffer entries.
4001 * The links (pointer to next entry) are filled in with the physical
4002 * address of the next entry so the adapter can navigate the list
4003 * using bus master DMA. The pointers to the actual buffers are filled
4004 * out later when the actual buffers are allocated.
4005 *
4006 * Arguments: info pointer to device instance data
4007 * Return Value: 0 if success, otherwise error
4008 */
mgsl_alloc_buffer_list_memory(struct mgsl_struct * info)4009 int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
4010 {
4011 unsigned int i;
4012
4013 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4014 /* PCI adapter uses shared memory. */
4015 info->buffer_list = info->memory_base + info->last_mem_alloc;
4016 info->buffer_list_phys = info->last_mem_alloc;
4017 info->last_mem_alloc += BUFFERLISTSIZE;
4018 } else {
4019 /* ISA adapter uses system memory. */
4020 /* The buffer lists are allocated as a common buffer that both */
4021 /* the processor and adapter can access. This allows the driver to */
4022 /* inspect portions of the buffer while other portions are being */
4023 /* updated by the adapter using Bus Master DMA. */
4024
4025 info->buffer_list = kmalloc(BUFFERLISTSIZE, GFP_KERNEL | GFP_DMA);
4026 if ( info->buffer_list == NULL )
4027 return -ENOMEM;
4028
4029 info->buffer_list_phys = virt_to_bus(info->buffer_list);
4030 }
4031
4032 /* We got the memory for the buffer entry lists. */
4033 /* Initialize the memory block to all zeros. */
4034 memset( info->buffer_list, 0, BUFFERLISTSIZE );
4035
4036 /* Save virtual address pointers to the receive and */
4037 /* transmit buffer lists. (Receive 1st). These pointers will */
4038 /* be used by the processor to access the lists. */
4039 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
4040 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
4041 info->tx_buffer_list += info->rx_buffer_count;
4042
4043 /*
4044 * Build the links for the buffer entry lists such that
4045 * two circular lists are built. (Transmit and Receive).
4046 *
4047 * Note: the links are physical addresses
4048 * which are read by the adapter to determine the next
4049 * buffer entry to use.
4050 */
4051
4052 for ( i = 0; i < info->rx_buffer_count; i++ ) {
4053 /* calculate and store physical address of this buffer entry */
4054 info->rx_buffer_list[i].phys_entry =
4055 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
4056
4057 /* calculate and store physical address of */
4058 /* next entry in cirular list of entries */
4059
4060 info->rx_buffer_list[i].link = info->buffer_list_phys;
4061
4062 if ( i < info->rx_buffer_count - 1 )
4063 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
4064 }
4065
4066 for ( i = 0; i < info->tx_buffer_count; i++ ) {
4067 /* calculate and store physical address of this buffer entry */
4068 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
4069 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
4070
4071 /* calculate and store physical address of */
4072 /* next entry in cirular list of entries */
4073
4074 info->tx_buffer_list[i].link = info->buffer_list_phys +
4075 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
4076
4077 if ( i < info->tx_buffer_count - 1 )
4078 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
4079 }
4080
4081 return 0;
4082
4083 } /* end of mgsl_alloc_buffer_list_memory() */
4084
4085 /* Free DMA buffers allocated for use as the
4086 * receive and transmit buffer lists.
4087 * Warning:
4088 *
4089 * The data transfer buffers associated with the buffer list
4090 * MUST be freed before freeing the buffer list itself because
4091 * the buffer list contains the information necessary to free
4092 * the individual buffers!
4093 */
mgsl_free_buffer_list_memory(struct mgsl_struct * info)4094 void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
4095 {
4096 if ( info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI )
4097 kfree(info->buffer_list);
4098
4099 info->buffer_list = NULL;
4100 info->rx_buffer_list = NULL;
4101 info->tx_buffer_list = NULL;
4102
4103 } /* end of mgsl_free_buffer_list_memory() */
4104
4105 /*
4106 * mgsl_alloc_frame_memory()
4107 *
4108 * Allocate the frame DMA buffers used by the specified buffer list.
4109 * Each DMA buffer will be one memory page in size. This is necessary
4110 * because memory can fragment enough that it may be impossible
4111 * contiguous pages.
4112 *
4113 * Arguments:
4114 *
4115 * info pointer to device instance data
4116 * BufferList pointer to list of buffer entries
4117 * Buffercount count of buffer entries in buffer list
4118 *
4119 * Return Value: 0 if success, otherwise -ENOMEM
4120 */
mgsl_alloc_frame_memory(struct mgsl_struct * info,DMABUFFERENTRY * BufferList,int Buffercount)4121 int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
4122 {
4123 int i;
4124 unsigned long phys_addr;
4125
4126 /* Allocate page sized buffers for the receive buffer list */
4127
4128 for ( i = 0; i < Buffercount; i++ ) {
4129 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4130 /* PCI adapter uses shared memory buffers. */
4131 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
4132 phys_addr = info->last_mem_alloc;
4133 info->last_mem_alloc += DMABUFFERSIZE;
4134 } else {
4135 /* ISA adapter uses system memory. */
4136 BufferList[i].virt_addr =
4137 kmalloc(DMABUFFERSIZE, GFP_KERNEL | GFP_DMA);
4138 if ( BufferList[i].virt_addr == NULL )
4139 return -ENOMEM;
4140 phys_addr = virt_to_bus(BufferList[i].virt_addr);
4141 }
4142 BufferList[i].phys_addr = phys_addr;
4143 }
4144
4145 return 0;
4146
4147 } /* end of mgsl_alloc_frame_memory() */
4148
4149 /*
4150 * mgsl_free_frame_memory()
4151 *
4152 * Free the buffers associated with
4153 * each buffer entry of a buffer list.
4154 *
4155 * Arguments:
4156 *
4157 * info pointer to device instance data
4158 * BufferList pointer to list of buffer entries
4159 * Buffercount count of buffer entries in buffer list
4160 *
4161 * Return Value: None
4162 */
mgsl_free_frame_memory(struct mgsl_struct * info,DMABUFFERENTRY * BufferList,int Buffercount)4163 void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
4164 {
4165 int i;
4166
4167 if ( BufferList ) {
4168 for ( i = 0 ; i < Buffercount ; i++ ) {
4169 if ( BufferList[i].virt_addr ) {
4170 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
4171 kfree(BufferList[i].virt_addr);
4172 BufferList[i].virt_addr = NULL;
4173 }
4174 }
4175 }
4176
4177 } /* end of mgsl_free_frame_memory() */
4178
4179 /* mgsl_free_dma_buffers()
4180 *
4181 * Free DMA buffers
4182 *
4183 * Arguments: info pointer to device instance data
4184 * Return Value: None
4185 */
mgsl_free_dma_buffers(struct mgsl_struct * info)4186 void mgsl_free_dma_buffers( struct mgsl_struct *info )
4187 {
4188 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
4189 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
4190 mgsl_free_buffer_list_memory( info );
4191
4192 } /* end of mgsl_free_dma_buffers() */
4193
4194
4195 /*
4196 * mgsl_alloc_intermediate_rxbuffer_memory()
4197 *
4198 * Allocate a buffer large enough to hold max_frame_size. This buffer
4199 * is used to pass an assembled frame to the line discipline.
4200 *
4201 * Arguments:
4202 *
4203 * info pointer to device instance data
4204 *
4205 * Return Value: 0 if success, otherwise -ENOMEM
4206 */
mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct * info)4207 int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
4208 {
4209 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
4210 if ( info->intermediate_rxbuffer == NULL )
4211 return -ENOMEM;
4212
4213 return 0;
4214
4215 } /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
4216
4217 /*
4218 * mgsl_free_intermediate_rxbuffer_memory()
4219 *
4220 *
4221 * Arguments:
4222 *
4223 * info pointer to device instance data
4224 *
4225 * Return Value: None
4226 */
mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct * info)4227 void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
4228 {
4229 if ( info->intermediate_rxbuffer )
4230 kfree(info->intermediate_rxbuffer);
4231
4232 info->intermediate_rxbuffer = NULL;
4233
4234 } /* end of mgsl_free_intermediate_rxbuffer_memory() */
4235
4236 /*
4237 * mgsl_alloc_intermediate_txbuffer_memory()
4238 *
4239 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
4240 * This buffer is used to load transmit frames into the adapter's dma transfer
4241 * buffers when there is sufficient space.
4242 *
4243 * Arguments:
4244 *
4245 * info pointer to device instance data
4246 *
4247 * Return Value: 0 if success, otherwise -ENOMEM
4248 */
mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct * info)4249 int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
4250 {
4251 int i;
4252
4253 if ( debug_level >= DEBUG_LEVEL_INFO )
4254 printk("%s %s(%d) allocating %d tx holding buffers\n",
4255 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
4256
4257 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
4258
4259 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
4260 info->tx_holding_buffers[i].buffer =
4261 kmalloc(info->max_frame_size, GFP_KERNEL);
4262 if ( info->tx_holding_buffers[i].buffer == NULL )
4263 return -ENOMEM;
4264 }
4265
4266 return 0;
4267
4268 } /* end of mgsl_alloc_intermediate_txbuffer_memory() */
4269
4270 /*
4271 * mgsl_free_intermediate_txbuffer_memory()
4272 *
4273 *
4274 * Arguments:
4275 *
4276 * info pointer to device instance data
4277 *
4278 * Return Value: None
4279 */
mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct * info)4280 void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
4281 {
4282 int i;
4283
4284 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
4285 if ( info->tx_holding_buffers[i].buffer ) {
4286 kfree(info->tx_holding_buffers[i].buffer);
4287 info->tx_holding_buffers[i].buffer=NULL;
4288 }
4289 }
4290
4291 info->get_tx_holding_index = 0;
4292 info->put_tx_holding_index = 0;
4293 info->tx_holding_count = 0;
4294
4295 } /* end of mgsl_free_intermediate_txbuffer_memory() */
4296
4297
4298 /*
4299 * load_next_tx_holding_buffer()
4300 *
4301 * attempts to load the next buffered tx request into the
4302 * tx dma buffers
4303 *
4304 * Arguments:
4305 *
4306 * info pointer to device instance data
4307 *
4308 * Return Value: 1 if next buffered tx request loaded
4309 * into adapter's tx dma buffer,
4310 * 0 otherwise
4311 */
load_next_tx_holding_buffer(struct mgsl_struct * info)4312 int load_next_tx_holding_buffer(struct mgsl_struct *info)
4313 {
4314 int ret = 0;
4315
4316 if ( info->tx_holding_count ) {
4317 /* determine if we have enough tx dma buffers
4318 * to accomodate the next tx frame
4319 */
4320 struct tx_holding_buffer *ptx =
4321 &info->tx_holding_buffers[info->get_tx_holding_index];
4322 int num_free = num_free_tx_dma_buffers(info);
4323 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4324 if ( ptx->buffer_size % DMABUFFERSIZE )
4325 ++num_needed;
4326
4327 if (num_needed <= num_free) {
4328 info->xmit_cnt = ptx->buffer_size;
4329 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4330
4331 --info->tx_holding_count;
4332 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4333 info->get_tx_holding_index=0;
4334
4335 /* restart transmit timer */
4336 mod_timer(&info->tx_timer, jiffies + jiffies_from_ms(5000));
4337
4338 ret = 1;
4339 }
4340 }
4341
4342 return ret;
4343 }
4344
4345 /*
4346 * save_tx_buffer_request()
4347 *
4348 * attempt to store transmit frame request for later transmission
4349 *
4350 * Arguments:
4351 *
4352 * info pointer to device instance data
4353 * Buffer pointer to buffer containing frame to load
4354 * BufferSize size in bytes of frame in Buffer
4355 *
4356 * Return Value: 1 if able to store, 0 otherwise
4357 */
save_tx_buffer_request(struct mgsl_struct * info,const char * Buffer,unsigned int BufferSize)4358 int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4359 {
4360 struct tx_holding_buffer *ptx;
4361
4362 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4363 return 0; /* all buffers in use */
4364 }
4365
4366 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4367 ptx->buffer_size = BufferSize;
4368 memcpy( ptx->buffer, Buffer, BufferSize);
4369
4370 ++info->tx_holding_count;
4371 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4372 info->put_tx_holding_index=0;
4373
4374 return 1;
4375 }
4376
mgsl_claim_resources(struct mgsl_struct * info)4377 int mgsl_claim_resources(struct mgsl_struct *info)
4378 {
4379 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4380 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4381 __FILE__,__LINE__,info->device_name, info->io_base);
4382 return -ENODEV;
4383 }
4384 info->io_addr_requested = 1;
4385
4386 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4387 info->device_name, info ) < 0 ) {
4388 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n",
4389 __FILE__,__LINE__,info->device_name, info->irq_level );
4390 goto errout;
4391 }
4392 info->irq_requested = 1;
4393
4394 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4395 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4396 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4397 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4398 goto errout;
4399 }
4400 info->shared_mem_requested = 1;
4401 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4402 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4403 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4404 goto errout;
4405 }
4406 info->lcr_mem_requested = 1;
4407
4408 info->memory_base = ioremap(info->phys_memory_base,0x40000);
4409 if (!info->memory_base) {
4410 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
4411 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4412 goto errout;
4413 }
4414
4415 if ( !mgsl_memory_test(info) ) {
4416 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4417 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4418 goto errout;
4419 }
4420
4421 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset;
4422 if (!info->lcr_base) {
4423 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
4424 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4425 goto errout;
4426 }
4427
4428 } else {
4429 /* claim DMA channel */
4430
4431 if (request_dma(info->dma_level,info->device_name) < 0){
4432 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n",
4433 __FILE__,__LINE__,info->device_name, info->dma_level );
4434 mgsl_release_resources( info );
4435 return -ENODEV;
4436 }
4437 info->dma_requested = 1;
4438
4439 /* ISA adapter uses bus master DMA */
4440 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4441 enable_dma(info->dma_level);
4442 }
4443
4444 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4445 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n",
4446 __FILE__,__LINE__,info->device_name, info->dma_level );
4447 goto errout;
4448 }
4449
4450 return 0;
4451 errout:
4452 mgsl_release_resources(info);
4453 return -ENODEV;
4454
4455 } /* end of mgsl_claim_resources() */
4456
mgsl_release_resources(struct mgsl_struct * info)4457 void mgsl_release_resources(struct mgsl_struct *info)
4458 {
4459 if ( debug_level >= DEBUG_LEVEL_INFO )
4460 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4461 __FILE__,__LINE__,info->device_name );
4462
4463 if ( info->irq_requested ) {
4464 free_irq(info->irq_level, info);
4465 info->irq_requested = 0;
4466 }
4467 if ( info->dma_requested ) {
4468 disable_dma(info->dma_level);
4469 free_dma(info->dma_level);
4470 info->dma_requested = 0;
4471 }
4472 mgsl_free_dma_buffers(info);
4473 mgsl_free_intermediate_rxbuffer_memory(info);
4474 mgsl_free_intermediate_txbuffer_memory(info);
4475
4476 if ( info->io_addr_requested ) {
4477 release_region(info->io_base,info->io_addr_size);
4478 info->io_addr_requested = 0;
4479 }
4480 if ( info->shared_mem_requested ) {
4481 release_mem_region(info->phys_memory_base,0x40000);
4482 info->shared_mem_requested = 0;
4483 }
4484 if ( info->lcr_mem_requested ) {
4485 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4486 info->lcr_mem_requested = 0;
4487 }
4488 if (info->memory_base){
4489 iounmap(info->memory_base);
4490 info->memory_base = 0;
4491 }
4492 if (info->lcr_base){
4493 iounmap(info->lcr_base - info->lcr_offset);
4494 info->lcr_base = 0;
4495 }
4496
4497 if ( debug_level >= DEBUG_LEVEL_INFO )
4498 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4499 __FILE__,__LINE__,info->device_name );
4500
4501 } /* end of mgsl_release_resources() */
4502
4503 /* mgsl_add_device()
4504 *
4505 * Add the specified device instance data structure to the
4506 * global linked list of devices and increment the device count.
4507 *
4508 * Arguments: info pointer to device instance data
4509 * Return Value: None
4510 */
mgsl_add_device(struct mgsl_struct * info)4511 void mgsl_add_device( struct mgsl_struct *info )
4512 {
4513 info->next_device = NULL;
4514 info->line = mgsl_device_count;
4515 sprintf(info->device_name,"ttySL%d",info->line);
4516
4517 if (info->line < MAX_TOTAL_DEVICES) {
4518 if (maxframe[info->line])
4519 info->max_frame_size = maxframe[info->line];
4520 info->dosyncppp = dosyncppp[info->line];
4521
4522 if (txdmabufs[info->line]) {
4523 info->num_tx_dma_buffers = txdmabufs[info->line];
4524 if (info->num_tx_dma_buffers < 1)
4525 info->num_tx_dma_buffers = 1;
4526 }
4527
4528 if (txholdbufs[info->line]) {
4529 info->num_tx_holding_buffers = txholdbufs[info->line];
4530 if (info->num_tx_holding_buffers < 1)
4531 info->num_tx_holding_buffers = 1;
4532 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4533 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4534 }
4535 }
4536
4537 mgsl_device_count++;
4538
4539 if ( !mgsl_device_list )
4540 mgsl_device_list = info;
4541 else {
4542 struct mgsl_struct *current_dev = mgsl_device_list;
4543 while( current_dev->next_device )
4544 current_dev = current_dev->next_device;
4545 current_dev->next_device = info;
4546 }
4547
4548 if ( info->max_frame_size < 4096 )
4549 info->max_frame_size = 4096;
4550 else if ( info->max_frame_size > 65535 )
4551 info->max_frame_size = 65535;
4552
4553 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4554 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4555 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4556 info->phys_memory_base, info->phys_lcr_base,
4557 info->max_frame_size );
4558 } else {
4559 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4560 info->device_name, info->io_base, info->irq_level, info->dma_level,
4561 info->max_frame_size );
4562 }
4563
4564 #ifdef CONFIG_SYNCLINK_SYNCPPP
4565 #ifdef MODULE
4566 if (info->dosyncppp)
4567 #endif
4568 mgsl_sppp_init(info);
4569 #endif
4570 } /* end of mgsl_add_device() */
4571
4572 /* mgsl_allocate_device()
4573 *
4574 * Allocate and initialize a device instance structure
4575 *
4576 * Arguments: none
4577 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4578 */
mgsl_allocate_device()4579 struct mgsl_struct* mgsl_allocate_device()
4580 {
4581 struct mgsl_struct *info;
4582
4583 info = (struct mgsl_struct *)kmalloc(sizeof(struct mgsl_struct),
4584 GFP_KERNEL);
4585
4586 if (!info) {
4587 printk("Error can't allocate device instance data\n");
4588 } else {
4589 memset(info, 0, sizeof(struct mgsl_struct));
4590 info->magic = MGSL_MAGIC;
4591 info->task.sync = 0;
4592 info->task.routine = mgsl_bh_handler;
4593 info->task.data = info;
4594 info->max_frame_size = 4096;
4595 info->close_delay = 5*HZ/10;
4596 info->closing_wait = 30*HZ;
4597 init_waitqueue_head(&info->open_wait);
4598 init_waitqueue_head(&info->close_wait);
4599 init_waitqueue_head(&info->status_event_wait_q);
4600 init_waitqueue_head(&info->event_wait_q);
4601 spin_lock_init(&info->irq_spinlock);
4602 spin_lock_init(&info->netlock);
4603 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4604 info->idle_mode = HDLC_TXIDLE_FLAGS;
4605 info->num_tx_dma_buffers = 1;
4606 info->num_tx_holding_buffers = 0;
4607 }
4608
4609 return info;
4610
4611 } /* end of mgsl_allocate_device()*/
4612
4613 /*
4614 * perform tty device initialization
4615 */
4616 int mgsl_init_tty(void);
mgsl_init_tty()4617 int mgsl_init_tty()
4618 {
4619 struct mgsl_struct *info;
4620
4621 memset(serial_table,0,sizeof(struct tty_struct*)*MAX_TOTAL_DEVICES);
4622 memset(serial_termios,0,sizeof(struct termios*)*MAX_TOTAL_DEVICES);
4623 memset(serial_termios_locked,0,sizeof(struct termios*)*MAX_TOTAL_DEVICES);
4624
4625 /* Initialize the tty_driver structure */
4626
4627 memset(&serial_driver, 0, sizeof(struct tty_driver));
4628 serial_driver.magic = TTY_DRIVER_MAGIC;
4629 serial_driver.driver_name = "synclink";
4630 serial_driver.name = "ttySL";
4631 serial_driver.major = ttymajor;
4632 serial_driver.minor_start = 64;
4633 serial_driver.num = mgsl_device_count;
4634 serial_driver.type = TTY_DRIVER_TYPE_SERIAL;
4635 serial_driver.subtype = SERIAL_TYPE_NORMAL;
4636 serial_driver.init_termios = tty_std_termios;
4637 serial_driver.init_termios.c_cflag =
4638 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4639 serial_driver.flags = TTY_DRIVER_REAL_RAW;
4640 serial_driver.refcount = &serial_refcount;
4641 serial_driver.table = serial_table;
4642 serial_driver.termios = serial_termios;
4643 serial_driver.termios_locked = serial_termios_locked;
4644
4645 serial_driver.open = mgsl_open;
4646 serial_driver.close = mgsl_close;
4647 serial_driver.write = mgsl_write;
4648 serial_driver.put_char = mgsl_put_char;
4649 serial_driver.flush_chars = mgsl_flush_chars;
4650 serial_driver.write_room = mgsl_write_room;
4651 serial_driver.chars_in_buffer = mgsl_chars_in_buffer;
4652 serial_driver.flush_buffer = mgsl_flush_buffer;
4653 serial_driver.ioctl = mgsl_ioctl;
4654 serial_driver.throttle = mgsl_throttle;
4655 serial_driver.unthrottle = mgsl_unthrottle;
4656 serial_driver.send_xchar = mgsl_send_xchar;
4657 serial_driver.break_ctl = mgsl_break;
4658 serial_driver.wait_until_sent = mgsl_wait_until_sent;
4659 serial_driver.read_proc = mgsl_read_proc;
4660 serial_driver.set_termios = mgsl_set_termios;
4661 serial_driver.stop = mgsl_stop;
4662 serial_driver.start = mgsl_start;
4663 serial_driver.hangup = mgsl_hangup;
4664
4665 /*
4666 * The callout device is just like normal device except for
4667 * major number and the subtype code.
4668 */
4669 callout_driver = serial_driver;
4670 callout_driver.name = "cuaSL";
4671 callout_driver.major = cuamajor;
4672 callout_driver.subtype = SERIAL_TYPE_CALLOUT;
4673 callout_driver.read_proc = 0;
4674 callout_driver.proc_entry = 0;
4675
4676 if (tty_register_driver(&serial_driver) < 0)
4677 printk("%s(%d):Couldn't register serial driver\n",
4678 __FILE__,__LINE__);
4679
4680 if (tty_register_driver(&callout_driver) < 0)
4681 printk("%s(%d):Couldn't register callout driver\n",
4682 __FILE__,__LINE__);
4683
4684 printk("%s %s, tty major#%d callout major#%d\n",
4685 driver_name, driver_version,
4686 serial_driver.major, callout_driver.major);
4687
4688 /* Propagate these values to all device instances */
4689
4690 info = mgsl_device_list;
4691 while(info){
4692 info->callout_termios = callout_driver.init_termios;
4693 info->normal_termios = serial_driver.init_termios;
4694 info = info->next_device;
4695 }
4696
4697 return 0;
4698 }
4699
4700 /* enumerate user specified ISA adapters
4701 */
mgsl_enum_isa_devices()4702 int mgsl_enum_isa_devices()
4703 {
4704 struct mgsl_struct *info;
4705 int i;
4706
4707 /* Check for user specified ISA devices */
4708
4709 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4710 if ( debug_level >= DEBUG_LEVEL_INFO )
4711 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4712 io[i], irq[i], dma[i] );
4713
4714 info = mgsl_allocate_device();
4715 if ( !info ) {
4716 /* error allocating device instance data */
4717 if ( debug_level >= DEBUG_LEVEL_ERROR )
4718 printk( "can't allocate device instance data.\n");
4719 continue;
4720 }
4721
4722 /* Copy user configuration info to device instance data */
4723 info->io_base = (unsigned int)io[i];
4724 info->irq_level = (unsigned int)irq[i];
4725 info->irq_level = irq_cannonicalize(info->irq_level);
4726 info->dma_level = (unsigned int)dma[i];
4727 info->bus_type = MGSL_BUS_TYPE_ISA;
4728 info->io_addr_size = 16;
4729 info->irq_flags = 0;
4730
4731 mgsl_add_device( info );
4732 }
4733
4734 return 0;
4735 }
4736
4737 /* mgsl_init()
4738 *
4739 * Driver initialization entry point.
4740 *
4741 * Arguments: None
4742 * Return Value: 0 if success, otherwise error code
4743 */
mgsl_init(void)4744 int __init mgsl_init(void)
4745 {
4746 int rc;
4747
4748 EXPORT_NO_SYMBOLS;
4749
4750 printk("%s %s\n", driver_name, driver_version);
4751
4752 mgsl_enum_isa_devices();
4753 pci_register_driver(&synclink_pci_driver);
4754
4755 if ( !mgsl_device_list ) {
4756 printk("%s(%d):No SyncLink devices found.\n",__FILE__,__LINE__);
4757 return -ENODEV;
4758 }
4759 if ((rc = mgsl_init_tty()))
4760 return rc;
4761
4762 return 0;
4763 }
4764
synclink_init(void)4765 static int __init synclink_init(void)
4766 {
4767 /* Uncomment this to kernel debug module.
4768 * mgsl_get_text_ptr() leaves the .text address in eax
4769 * which can be used with add-symbol-file with gdb.
4770 */
4771 if (break_on_load) {
4772 mgsl_get_text_ptr();
4773 BREAKPOINT();
4774 }
4775
4776 return mgsl_init();
4777 }
4778
synclink_exit(void)4779 static void __exit synclink_exit(void)
4780 {
4781 unsigned long flags;
4782 int rc;
4783 struct mgsl_struct *info;
4784 struct mgsl_struct *tmp;
4785
4786 printk("Unloading %s: %s\n", driver_name, driver_version);
4787 save_flags(flags);
4788 cli();
4789 if ((rc = tty_unregister_driver(&serial_driver)))
4790 printk("%s(%d) failed to unregister tty driver err=%d\n",
4791 __FILE__,__LINE__,rc);
4792 if ((rc = tty_unregister_driver(&callout_driver)))
4793 printk("%s(%d) failed to unregister callout driver err=%d\n",
4794 __FILE__,__LINE__,rc);
4795 restore_flags(flags);
4796
4797 info = mgsl_device_list;
4798 while(info) {
4799 #ifdef CONFIG_SYNCLINK_SYNCPPP
4800 if (info->dosyncppp)
4801 mgsl_sppp_delete(info);
4802 #endif
4803 mgsl_release_resources(info);
4804 tmp = info;
4805 info = info->next_device;
4806 kfree(tmp);
4807 }
4808
4809 if (tmp_buf) {
4810 free_page((unsigned long) tmp_buf);
4811 tmp_buf = NULL;
4812 }
4813
4814 pci_unregister_driver(&synclink_pci_driver);
4815 }
4816
4817 module_init(synclink_init);
4818 module_exit(synclink_exit);
4819
4820 /*
4821 * usc_RTCmd()
4822 *
4823 * Issue a USC Receive/Transmit command to the
4824 * Channel Command/Address Register (CCAR).
4825 *
4826 * Notes:
4827 *
4828 * The command is encoded in the most significant 5 bits <15..11>
4829 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4830 * and Bits <6..0> must be written as zeros.
4831 *
4832 * Arguments:
4833 *
4834 * info pointer to device information structure
4835 * Cmd command mask (use symbolic macros)
4836 *
4837 * Return Value:
4838 *
4839 * None
4840 */
usc_RTCmd(struct mgsl_struct * info,u16 Cmd)4841 void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4842 {
4843 /* output command to CCAR in bits <15..11> */
4844 /* preserve bits <10..7>, bits <6..0> must be zero */
4845
4846 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4847
4848 /* Read to flush write to CCAR */
4849 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4850 inw( info->io_base + CCAR );
4851
4852 } /* end of usc_RTCmd() */
4853
4854 /*
4855 * usc_DmaCmd()
4856 *
4857 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4858 *
4859 * Arguments:
4860 *
4861 * info pointer to device information structure
4862 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4863 *
4864 * Return Value:
4865 *
4866 * None
4867 */
usc_DmaCmd(struct mgsl_struct * info,u16 Cmd)4868 void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4869 {
4870 /* write command mask to DCAR */
4871 outw( Cmd + info->mbre_bit, info->io_base );
4872
4873 /* Read to flush write to DCAR */
4874 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4875 inw( info->io_base );
4876
4877 } /* end of usc_DmaCmd() */
4878
4879 /*
4880 * usc_OutDmaReg()
4881 *
4882 * Write a 16-bit value to a USC DMA register
4883 *
4884 * Arguments:
4885 *
4886 * info pointer to device info structure
4887 * RegAddr register address (number) for write
4888 * RegValue 16-bit value to write to register
4889 *
4890 * Return Value:
4891 *
4892 * None
4893 *
4894 */
usc_OutDmaReg(struct mgsl_struct * info,u16 RegAddr,u16 RegValue)4895 void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4896 {
4897 /* Note: The DCAR is located at the adapter base address */
4898 /* Note: must preserve state of BIT8 in DCAR */
4899
4900 outw( RegAddr + info->mbre_bit, info->io_base );
4901 outw( RegValue, info->io_base );
4902
4903 /* Read to flush write to DCAR */
4904 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4905 inw( info->io_base );
4906
4907 } /* end of usc_OutDmaReg() */
4908
4909 /*
4910 * usc_InDmaReg()
4911 *
4912 * Read a 16-bit value from a DMA register
4913 *
4914 * Arguments:
4915 *
4916 * info pointer to device info structure
4917 * RegAddr register address (number) to read from
4918 *
4919 * Return Value:
4920 *
4921 * The 16-bit value read from register
4922 *
4923 */
usc_InDmaReg(struct mgsl_struct * info,u16 RegAddr)4924 u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4925 {
4926 /* Note: The DCAR is located at the adapter base address */
4927 /* Note: must preserve state of BIT8 in DCAR */
4928
4929 outw( RegAddr + info->mbre_bit, info->io_base );
4930 return inw( info->io_base );
4931
4932 } /* end of usc_InDmaReg() */
4933
4934 /*
4935 *
4936 * usc_OutReg()
4937 *
4938 * Write a 16-bit value to a USC serial channel register
4939 *
4940 * Arguments:
4941 *
4942 * info pointer to device info structure
4943 * RegAddr register address (number) to write to
4944 * RegValue 16-bit value to write to register
4945 *
4946 * Return Value:
4947 *
4948 * None
4949 *
4950 */
usc_OutReg(struct mgsl_struct * info,u16 RegAddr,u16 RegValue)4951 void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4952 {
4953 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4954 outw( RegValue, info->io_base + CCAR );
4955
4956 /* Read to flush write to CCAR */
4957 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4958 inw( info->io_base + CCAR );
4959
4960 } /* end of usc_OutReg() */
4961
4962 /*
4963 * usc_InReg()
4964 *
4965 * Reads a 16-bit value from a USC serial channel register
4966 *
4967 * Arguments:
4968 *
4969 * info pointer to device extension
4970 * RegAddr register address (number) to read from
4971 *
4972 * Return Value:
4973 *
4974 * 16-bit value read from register
4975 */
usc_InReg(struct mgsl_struct * info,u16 RegAddr)4976 u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4977 {
4978 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4979 return inw( info->io_base + CCAR );
4980
4981 } /* end of usc_InReg() */
4982
4983 /* usc_set_sdlc_mode()
4984 *
4985 * Set up the adapter for SDLC DMA communications.
4986 *
4987 * Arguments: info pointer to device instance data
4988 * Return Value: NONE
4989 */
usc_set_sdlc_mode(struct mgsl_struct * info)4990 void usc_set_sdlc_mode( struct mgsl_struct *info )
4991 {
4992 u16 RegValue;
4993 int PreSL1660;
4994
4995 /*
4996 * determine if the IUSC on the adapter is pre-SL1660. If
4997 * not, take advantage of the UnderWait feature of more
4998 * modern chips. If an underrun occurs and this bit is set,
4999 * the transmitter will idle the programmed idle pattern
5000 * until the driver has time to service the underrun. Otherwise,
5001 * the dma controller may get the cycles previously requested
5002 * and begin transmitting queued tx data.
5003 */
5004 usc_OutReg(info,TMCR,0x1f);
5005 RegValue=usc_InReg(info,TMDR);
5006 if ( RegValue == IUSC_PRE_SL1660 )
5007 PreSL1660 = 1;
5008 else
5009 PreSL1660 = 0;
5010
5011
5012 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
5013 {
5014 /*
5015 ** Channel Mode Register (CMR)
5016 **
5017 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
5018 ** <13> 0 0 = Transmit Disabled (initially)
5019 ** <12> 0 1 = Consecutive Idles share common 0
5020 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
5021 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
5022 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
5023 **
5024 ** 1000 1110 0000 0110 = 0x8e06
5025 */
5026 RegValue = 0x8e06;
5027
5028 /*--------------------------------------------------
5029 * ignore user options for UnderRun Actions and
5030 * preambles
5031 *--------------------------------------------------*/
5032 }
5033 else
5034 {
5035 /* Channel mode Register (CMR)
5036 *
5037 * <15..14> 00 Tx Sub modes, Underrun Action
5038 * <13> 0 1 = Send Preamble before opening flag
5039 * <12> 0 1 = Consecutive Idles share common 0
5040 * <11..8> 0110 Transmitter mode = HDLC/SDLC
5041 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
5042 * <3..0> 0110 Receiver mode = HDLC/SDLC
5043 *
5044 * 0000 0110 0000 0110 = 0x0606
5045 */
5046 if (info->params.mode == MGSL_MODE_RAW) {
5047 RegValue = 0x0001; /* Set Receive mode = external sync */
5048
5049 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
5050 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
5051
5052 /*
5053 * TxSubMode:
5054 * CMR <15> 0 Don't send CRC on Tx Underrun
5055 * CMR <14> x undefined
5056 * CMR <13> 0 Send preamble before openning sync
5057 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
5058 *
5059 * TxMode:
5060 * CMR <11-8) 0100 MonoSync
5061 *
5062 * 0x00 0100 xxxx xxxx 04xx
5063 */
5064 RegValue |= 0x0400;
5065 }
5066 else {
5067
5068 RegValue = 0x0606;
5069
5070 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
5071 RegValue |= BIT14;
5072 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
5073 RegValue |= BIT15;
5074 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
5075 RegValue |= BIT15 + BIT14;
5076 }
5077
5078 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
5079 RegValue |= BIT13;
5080 }
5081
5082 if ( info->params.mode == MGSL_MODE_HDLC &&
5083 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
5084 RegValue |= BIT12;
5085
5086 if ( info->params.addr_filter != 0xff )
5087 {
5088 /* set up receive address filtering */
5089 usc_OutReg( info, RSR, info->params.addr_filter );
5090 RegValue |= BIT4;
5091 }
5092
5093 usc_OutReg( info, CMR, RegValue );
5094 info->cmr_value = RegValue;
5095
5096 /* Receiver mode Register (RMR)
5097 *
5098 * <15..13> 000 encoding
5099 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
5100 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
5101 * <9> 0 1 = Include Receive chars in CRC
5102 * <8> 1 1 = Use Abort/PE bit as abort indicator
5103 * <7..6> 00 Even parity
5104 * <5> 0 parity disabled
5105 * <4..2> 000 Receive Char Length = 8 bits
5106 * <1..0> 00 Disable Receiver
5107 *
5108 * 0000 0101 0000 0000 = 0x0500
5109 */
5110
5111 RegValue = 0x0500;
5112
5113 switch ( info->params.encoding ) {
5114 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
5115 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
5116 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
5117 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
5118 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
5119 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
5120 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
5121 }
5122
5123 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
5124 RegValue |= BIT9;
5125 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
5126 RegValue |= ( BIT12 | BIT10 | BIT9 );
5127
5128 usc_OutReg( info, RMR, RegValue );
5129
5130 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
5131 /* When an opening flag of an SDLC frame is recognized the */
5132 /* Receive Character count (RCC) is loaded with the value in */
5133 /* RCLR. The RCC is decremented for each received byte. The */
5134 /* value of RCC is stored after the closing flag of the frame */
5135 /* allowing the frame size to be computed. */
5136
5137 usc_OutReg( info, RCLR, RCLRVALUE );
5138
5139 usc_RCmd( info, RCmd_SelectRicrdma_level );
5140
5141 /* Receive Interrupt Control Register (RICR)
5142 *
5143 * <15..8> ? RxFIFO DMA Request Level
5144 * <7> 0 Exited Hunt IA (Interrupt Arm)
5145 * <6> 0 Idle Received IA
5146 * <5> 0 Break/Abort IA
5147 * <4> 0 Rx Bound IA
5148 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
5149 * <2> 0 Abort/PE IA
5150 * <1> 1 Rx Overrun IA
5151 * <0> 0 Select TC0 value for readback
5152 *
5153 * 0000 0000 0000 1000 = 0x000a
5154 */
5155
5156 /* Carry over the Exit Hunt and Idle Received bits */
5157 /* in case they have been armed by usc_ArmEvents. */
5158
5159 RegValue = usc_InReg( info, RICR ) & 0xc0;
5160
5161 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5162 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
5163 else
5164 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
5165
5166 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
5167
5168 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5169 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5170
5171 /* Transmit mode Register (TMR)
5172 *
5173 * <15..13> 000 encoding
5174 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
5175 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
5176 * <9> 0 1 = Tx CRC Enabled
5177 * <8> 0 1 = Append CRC to end of transmit frame
5178 * <7..6> 00 Transmit parity Even
5179 * <5> 0 Transmit parity Disabled
5180 * <4..2> 000 Tx Char Length = 8 bits
5181 * <1..0> 00 Disable Transmitter
5182 *
5183 * 0000 0100 0000 0000 = 0x0400
5184 */
5185
5186 RegValue = 0x0400;
5187
5188 switch ( info->params.encoding ) {
5189 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
5190 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
5191 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
5192 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
5193 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
5194 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
5195 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
5196 }
5197
5198 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
5199 RegValue |= BIT9 + BIT8;
5200 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
5201 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
5202
5203 usc_OutReg( info, TMR, RegValue );
5204
5205 usc_set_txidle( info );
5206
5207
5208 usc_TCmd( info, TCmd_SelectTicrdma_level );
5209
5210 /* Transmit Interrupt Control Register (TICR)
5211 *
5212 * <15..8> ? Transmit FIFO DMA Level
5213 * <7> 0 Present IA (Interrupt Arm)
5214 * <6> 0 Idle Sent IA
5215 * <5> 1 Abort Sent IA
5216 * <4> 1 EOF/EOM Sent IA
5217 * <3> 0 CRC Sent IA
5218 * <2> 1 1 = Wait for SW Trigger to Start Frame
5219 * <1> 1 Tx Underrun IA
5220 * <0> 0 TC0 constant on read back
5221 *
5222 * 0000 0000 0011 0110 = 0x0036
5223 */
5224
5225 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5226 usc_OutReg( info, TICR, 0x0736 );
5227 else
5228 usc_OutReg( info, TICR, 0x1436 );
5229
5230 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5231 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5232
5233 /*
5234 ** Transmit Command/Status Register (TCSR)
5235 **
5236 ** <15..12> 0000 TCmd
5237 ** <11> 0/1 UnderWait
5238 ** <10..08> 000 TxIdle
5239 ** <7> x PreSent
5240 ** <6> x IdleSent
5241 ** <5> x AbortSent
5242 ** <4> x EOF/EOM Sent
5243 ** <3> x CRC Sent
5244 ** <2> x All Sent
5245 ** <1> x TxUnder
5246 ** <0> x TxEmpty
5247 **
5248 ** 0000 0000 0000 0000 = 0x0000
5249 */
5250 info->tcsr_value = 0;
5251
5252 if ( !PreSL1660 )
5253 info->tcsr_value |= TCSR_UNDERWAIT;
5254
5255 usc_OutReg( info, TCSR, info->tcsr_value );
5256
5257 /* Clock mode Control Register (CMCR)
5258 *
5259 * <15..14> 00 counter 1 Source = Disabled
5260 * <13..12> 00 counter 0 Source = Disabled
5261 * <11..10> 11 BRG1 Input is TxC Pin
5262 * <9..8> 11 BRG0 Input is TxC Pin
5263 * <7..6> 01 DPLL Input is BRG1 Output
5264 * <5..3> XXX TxCLK comes from Port 0
5265 * <2..0> XXX RxCLK comes from Port 1
5266 *
5267 * 0000 1111 0111 0111 = 0x0f77
5268 */
5269
5270 RegValue = 0x0f40;
5271
5272 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
5273 RegValue |= 0x0003; /* RxCLK from DPLL */
5274 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
5275 RegValue |= 0x0004; /* RxCLK from BRG0 */
5276 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
5277 RegValue |= 0x0006; /* RxCLK from TXC Input */
5278 else
5279 RegValue |= 0x0007; /* RxCLK from Port1 */
5280
5281 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
5282 RegValue |= 0x0018; /* TxCLK from DPLL */
5283 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
5284 RegValue |= 0x0020; /* TxCLK from BRG0 */
5285 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
5286 RegValue |= 0x0038; /* RxCLK from TXC Input */
5287 else
5288 RegValue |= 0x0030; /* TxCLK from Port0 */
5289
5290 usc_OutReg( info, CMCR, RegValue );
5291
5292
5293 /* Hardware Configuration Register (HCR)
5294 *
5295 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
5296 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
5297 * <12> 0 CVOK:0=report code violation in biphase
5298 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
5299 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
5300 * <7..6> 00 reserved
5301 * <5> 0 BRG1 mode:0=continuous,1=single cycle
5302 * <4> X BRG1 Enable
5303 * <3..2> 00 reserved
5304 * <1> 0 BRG0 mode:0=continuous,1=single cycle
5305 * <0> 0 BRG0 Enable
5306 */
5307
5308 RegValue = 0x0000;
5309
5310 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
5311 u32 XtalSpeed;
5312 u32 DpllDivisor;
5313 u16 Tc;
5314
5315 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
5316 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
5317
5318 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5319 XtalSpeed = 11059200;
5320 else
5321 XtalSpeed = 14745600;
5322
5323 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
5324 DpllDivisor = 16;
5325 RegValue |= BIT10;
5326 }
5327 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
5328 DpllDivisor = 8;
5329 RegValue |= BIT11;
5330 }
5331 else
5332 DpllDivisor = 32;
5333
5334 /* Tc = (Xtal/Speed) - 1 */
5335 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5336 /* then rounding up gives a more precise time constant. Instead */
5337 /* of rounding up and then subtracting 1 we just don't subtract */
5338 /* the one in this case. */
5339
5340 /*--------------------------------------------------
5341 * ejz: for DPLL mode, application should use the
5342 * same clock speed as the partner system, even
5343 * though clocking is derived from the input RxData.
5344 * In case the user uses a 0 for the clock speed,
5345 * default to 0xffffffff and don't try to divide by
5346 * zero
5347 *--------------------------------------------------*/
5348 if ( info->params.clock_speed )
5349 {
5350 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
5351 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
5352 / info->params.clock_speed) )
5353 Tc--;
5354 }
5355 else
5356 Tc = -1;
5357
5358
5359 /* Write 16-bit Time Constant for BRG1 */
5360 usc_OutReg( info, TC1R, Tc );
5361
5362 RegValue |= BIT4; /* enable BRG1 */
5363
5364 switch ( info->params.encoding ) {
5365 case HDLC_ENCODING_NRZ:
5366 case HDLC_ENCODING_NRZB:
5367 case HDLC_ENCODING_NRZI_MARK:
5368 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5369 case HDLC_ENCODING_BIPHASE_MARK:
5370 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5371 case HDLC_ENCODING_BIPHASE_LEVEL:
5372 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5373 }
5374 }
5375
5376 usc_OutReg( info, HCR, RegValue );
5377
5378
5379 /* Channel Control/status Register (CCSR)
5380 *
5381 * <15> X RCC FIFO Overflow status (RO)
5382 * <14> X RCC FIFO Not Empty status (RO)
5383 * <13> 0 1 = Clear RCC FIFO (WO)
5384 * <12> X DPLL Sync (RW)
5385 * <11> X DPLL 2 Missed Clocks status (RO)
5386 * <10> X DPLL 1 Missed Clock status (RO)
5387 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5388 * <7> X SDLC Loop On status (RO)
5389 * <6> X SDLC Loop Send status (RO)
5390 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5391 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5392 * <1..0> 00 reserved
5393 *
5394 * 0000 0000 0010 0000 = 0x0020
5395 */
5396
5397 usc_OutReg( info, CCSR, 0x1020 );
5398
5399
5400 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5401 usc_OutReg( info, SICR,
5402 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5403 }
5404
5405
5406 /* enable Master Interrupt Enable bit (MIE) */
5407 usc_EnableMasterIrqBit( info );
5408
5409 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5410 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5411
5412 /* arm RCC underflow interrupt */
5413 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5414 usc_EnableInterrupts(info, MISC);
5415
5416 info->mbre_bit = 0;
5417 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5418 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5419 info->mbre_bit = BIT8;
5420 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5421
5422 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5423 /* Enable DMAEN (Port 7, Bit 14) */
5424 /* This connects the DMA request signal to the ISA bus */
5425 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5426 }
5427
5428 /* DMA Control Register (DCR)
5429 *
5430 * <15..14> 10 Priority mode = Alternating Tx/Rx
5431 * 01 Rx has priority
5432 * 00 Tx has priority
5433 *
5434 * <13> 1 Enable Priority Preempt per DCR<15..14>
5435 * (WARNING DCR<11..10> must be 00 when this is 1)
5436 * 0 Choose activate channel per DCR<11..10>
5437 *
5438 * <12> 0 Little Endian for Array/List
5439 * <11..10> 00 Both Channels can use each bus grant
5440 * <9..6> 0000 reserved
5441 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5442 * <4> 0 1 = drive D/C and S/D pins
5443 * <3> 1 1 = Add one wait state to all DMA cycles.
5444 * <2> 0 1 = Strobe /UAS on every transfer.
5445 * <1..0> 11 Addr incrementing only affects LS24 bits
5446 *
5447 * 0110 0000 0000 1011 = 0x600b
5448 */
5449
5450 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5451 /* PCI adapter does not need DMA wait state */
5452 usc_OutDmaReg( info, DCR, 0xa00b );
5453 }
5454 else
5455 usc_OutDmaReg( info, DCR, 0x800b );
5456
5457
5458 /* Receive DMA mode Register (RDMR)
5459 *
5460 * <15..14> 11 DMA mode = Linked List Buffer mode
5461 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5462 * <12> 1 Clear count of List Entry after fetching
5463 * <11..10> 00 Address mode = Increment
5464 * <9> 1 Terminate Buffer on RxBound
5465 * <8> 0 Bus Width = 16bits
5466 * <7..0> ? status Bits (write as 0s)
5467 *
5468 * 1111 0010 0000 0000 = 0xf200
5469 */
5470
5471 usc_OutDmaReg( info, RDMR, 0xf200 );
5472
5473
5474 /* Transmit DMA mode Register (TDMR)
5475 *
5476 * <15..14> 11 DMA mode = Linked List Buffer mode
5477 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5478 * <12> 1 Clear count of List Entry after fetching
5479 * <11..10> 00 Address mode = Increment
5480 * <9> 1 Terminate Buffer on end of frame
5481 * <8> 0 Bus Width = 16bits
5482 * <7..0> ? status Bits (Read Only so write as 0)
5483 *
5484 * 1111 0010 0000 0000 = 0xf200
5485 */
5486
5487 usc_OutDmaReg( info, TDMR, 0xf200 );
5488
5489
5490 /* DMA Interrupt Control Register (DICR)
5491 *
5492 * <15> 1 DMA Interrupt Enable
5493 * <14> 0 1 = Disable IEO from USC
5494 * <13> 0 1 = Don't provide vector during IntAck
5495 * <12> 1 1 = Include status in Vector
5496 * <10..2> 0 reserved, Must be 0s
5497 * <1> 0 1 = Rx DMA Interrupt Enabled
5498 * <0> 0 1 = Tx DMA Interrupt Enabled
5499 *
5500 * 1001 0000 0000 0000 = 0x9000
5501 */
5502
5503 usc_OutDmaReg( info, DICR, 0x9000 );
5504
5505 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5506 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5507 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5508
5509 /* Channel Control Register (CCR)
5510 *
5511 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5512 * <13> 0 Trigger Tx on SW Command Disabled
5513 * <12> 0 Flag Preamble Disabled
5514 * <11..10> 00 Preamble Length
5515 * <9..8> 00 Preamble Pattern
5516 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5517 * <5> 0 Trigger Rx on SW Command Disabled
5518 * <4..0> 0 reserved
5519 *
5520 * 1000 0000 1000 0000 = 0x8080
5521 */
5522
5523 RegValue = 0x8080;
5524
5525 switch ( info->params.preamble_length ) {
5526 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5527 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5528 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5529 }
5530
5531 switch ( info->params.preamble ) {
5532 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5533 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5534 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5535 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5536 }
5537
5538 usc_OutReg( info, CCR, RegValue );
5539
5540
5541 /*
5542 * Burst/Dwell Control Register
5543 *
5544 * <15..8> 0x20 Maximum number of transfers per bus grant
5545 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5546 */
5547
5548 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5549 /* don't limit bus occupancy on PCI adapter */
5550 usc_OutDmaReg( info, BDCR, 0x0000 );
5551 }
5552 else
5553 usc_OutDmaReg( info, BDCR, 0x2000 );
5554
5555 usc_stop_transmitter(info);
5556 usc_stop_receiver(info);
5557
5558 } /* end of usc_set_sdlc_mode() */
5559
5560 /* usc_enable_loopback()
5561 *
5562 * Set the 16C32 for internal loopback mode.
5563 * The TxCLK and RxCLK signals are generated from the BRG0 and
5564 * the TxD is looped back to the RxD internally.
5565 *
5566 * Arguments: info pointer to device instance data
5567 * enable 1 = enable loopback, 0 = disable
5568 * Return Value: None
5569 */
usc_enable_loopback(struct mgsl_struct * info,int enable)5570 void usc_enable_loopback(struct mgsl_struct *info, int enable)
5571 {
5572 if (enable) {
5573 /* blank external TXD output */
5574 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5575
5576 /* Clock mode Control Register (CMCR)
5577 *
5578 * <15..14> 00 counter 1 Disabled
5579 * <13..12> 00 counter 0 Disabled
5580 * <11..10> 11 BRG1 Input is TxC Pin
5581 * <9..8> 11 BRG0 Input is TxC Pin
5582 * <7..6> 01 DPLL Input is BRG1 Output
5583 * <5..3> 100 TxCLK comes from BRG0
5584 * <2..0> 100 RxCLK comes from BRG0
5585 *
5586 * 0000 1111 0110 0100 = 0x0f64
5587 */
5588
5589 usc_OutReg( info, CMCR, 0x0f64 );
5590
5591 /* Write 16-bit Time Constant for BRG0 */
5592 /* use clock speed if available, otherwise use 8 for diagnostics */
5593 if (info->params.clock_speed) {
5594 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5595 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5596 else
5597 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5598 } else
5599 usc_OutReg(info, TC0R, (u16)8);
5600
5601 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5602 mode = Continuous Set Bit 0 to enable BRG0. */
5603 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5604
5605 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5606 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5607
5608 /* set Internal Data loopback mode */
5609 info->loopback_bits = 0x300;
5610 outw( 0x0300, info->io_base + CCAR );
5611 } else {
5612 /* enable external TXD output */
5613 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5614
5615 /* clear Internal Data loopback mode */
5616 info->loopback_bits = 0;
5617 outw( 0,info->io_base + CCAR );
5618 }
5619
5620 } /* end of usc_enable_loopback() */
5621
5622 /* usc_enable_aux_clock()
5623 *
5624 * Enabled the AUX clock output at the specified frequency.
5625 *
5626 * Arguments:
5627 *
5628 * info pointer to device extension
5629 * data_rate data rate of clock in bits per second
5630 * A data rate of 0 disables the AUX clock.
5631 *
5632 * Return Value: None
5633 */
usc_enable_aux_clock(struct mgsl_struct * info,u32 data_rate)5634 void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5635 {
5636 u32 XtalSpeed;
5637 u16 Tc;
5638
5639 if ( data_rate ) {
5640 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5641 XtalSpeed = 11059200;
5642 else
5643 XtalSpeed = 14745600;
5644
5645
5646 /* Tc = (Xtal/Speed) - 1 */
5647 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5648 /* then rounding up gives a more precise time constant. Instead */
5649 /* of rounding up and then subtracting 1 we just don't subtract */
5650 /* the one in this case. */
5651
5652
5653 Tc = (u16)(XtalSpeed/data_rate);
5654 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5655 Tc--;
5656
5657 /* Write 16-bit Time Constant for BRG0 */
5658 usc_OutReg( info, TC0R, Tc );
5659
5660 /*
5661 * Hardware Configuration Register (HCR)
5662 * Clear Bit 1, BRG0 mode = Continuous
5663 * Set Bit 0 to enable BRG0.
5664 */
5665
5666 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5667
5668 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5669 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5670 } else {
5671 /* data rate == 0 so turn off BRG0 */
5672 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5673 }
5674
5675 } /* end of usc_enable_aux_clock() */
5676
5677 /*
5678 *
5679 * usc_process_rxoverrun_sync()
5680 *
5681 * This function processes a receive overrun by resetting the
5682 * receive DMA buffers and issuing a Purge Rx FIFO command
5683 * to allow the receiver to continue receiving.
5684 *
5685 * Arguments:
5686 *
5687 * info pointer to device extension
5688 *
5689 * Return Value: None
5690 */
usc_process_rxoverrun_sync(struct mgsl_struct * info)5691 void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5692 {
5693 int start_index;
5694 int end_index;
5695 int frame_start_index;
5696 int start_of_frame_found = FALSE;
5697 int end_of_frame_found = FALSE;
5698 int reprogram_dma = FALSE;
5699
5700 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5701 u32 phys_addr;
5702
5703 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5704 usc_RCmd( info, RCmd_EnterHuntmode );
5705 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5706
5707 /* CurrentRxBuffer points to the 1st buffer of the next */
5708 /* possibly available receive frame. */
5709
5710 frame_start_index = start_index = end_index = info->current_rx_buffer;
5711
5712 /* Search for an unfinished string of buffers. This means */
5713 /* that a receive frame started (at least one buffer with */
5714 /* count set to zero) but there is no terminiting buffer */
5715 /* (status set to non-zero). */
5716
5717 while( !buffer_list[end_index].count )
5718 {
5719 /* Count field has been reset to zero by 16C32. */
5720 /* This buffer is currently in use. */
5721
5722 if ( !start_of_frame_found )
5723 {
5724 start_of_frame_found = TRUE;
5725 frame_start_index = end_index;
5726 end_of_frame_found = FALSE;
5727 }
5728
5729 if ( buffer_list[end_index].status )
5730 {
5731 /* Status field has been set by 16C32. */
5732 /* This is the last buffer of a received frame. */
5733
5734 /* We want to leave the buffers for this frame intact. */
5735 /* Move on to next possible frame. */
5736
5737 start_of_frame_found = FALSE;
5738 end_of_frame_found = TRUE;
5739 }
5740
5741 /* advance to next buffer entry in linked list */
5742 end_index++;
5743 if ( end_index == info->rx_buffer_count )
5744 end_index = 0;
5745
5746 if ( start_index == end_index )
5747 {
5748 /* The entire list has been searched with all Counts == 0 and */
5749 /* all Status == 0. The receive buffers are */
5750 /* completely screwed, reset all receive buffers! */
5751 mgsl_reset_rx_dma_buffers( info );
5752 frame_start_index = 0;
5753 start_of_frame_found = FALSE;
5754 reprogram_dma = TRUE;
5755 break;
5756 }
5757 }
5758
5759 if ( start_of_frame_found && !end_of_frame_found )
5760 {
5761 /* There is an unfinished string of receive DMA buffers */
5762 /* as a result of the receiver overrun. */
5763
5764 /* Reset the buffers for the unfinished frame */
5765 /* and reprogram the receive DMA controller to start */
5766 /* at the 1st buffer of unfinished frame. */
5767
5768 start_index = frame_start_index;
5769
5770 do
5771 {
5772 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5773
5774 /* Adjust index for wrap around. */
5775 if ( start_index == info->rx_buffer_count )
5776 start_index = 0;
5777
5778 } while( start_index != end_index );
5779
5780 reprogram_dma = TRUE;
5781 }
5782
5783 if ( reprogram_dma )
5784 {
5785 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5786 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5787 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5788
5789 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5790
5791 /* This empties the receive FIFO and loads the RCC with RCLR */
5792 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5793
5794 /* program 16C32 with physical address of 1st DMA buffer entry */
5795 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5796 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5797 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5798
5799 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5800 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5801 usc_EnableInterrupts( info, RECEIVE_STATUS );
5802
5803 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5804 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5805
5806 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5807 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5808 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5809 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5810 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5811 else
5812 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5813 }
5814 else
5815 {
5816 /* This empties the receive FIFO and loads the RCC with RCLR */
5817 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5818 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5819 }
5820
5821 } /* end of usc_process_rxoverrun_sync() */
5822
5823 /* usc_stop_receiver()
5824 *
5825 * Disable USC receiver
5826 *
5827 * Arguments: info pointer to device instance data
5828 * Return Value: None
5829 */
usc_stop_receiver(struct mgsl_struct * info)5830 void usc_stop_receiver( struct mgsl_struct *info )
5831 {
5832 if (debug_level >= DEBUG_LEVEL_ISR)
5833 printk("%s(%d):usc_stop_receiver(%s)\n",
5834 __FILE__,__LINE__, info->device_name );
5835
5836 /* Disable receive DMA channel. */
5837 /* This also disables receive DMA channel interrupts */
5838 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5839
5840 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5841 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5842 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5843
5844 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5845
5846 /* This empties the receive FIFO and loads the RCC with RCLR */
5847 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5848 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5849
5850 info->rx_enabled = 0;
5851 info->rx_overflow = 0;
5852 info->rx_rcc_underrun = 0;
5853
5854 } /* end of stop_receiver() */
5855
5856 /* usc_start_receiver()
5857 *
5858 * Enable the USC receiver
5859 *
5860 * Arguments: info pointer to device instance data
5861 * Return Value: None
5862 */
usc_start_receiver(struct mgsl_struct * info)5863 void usc_start_receiver( struct mgsl_struct *info )
5864 {
5865 u32 phys_addr;
5866
5867 if (debug_level >= DEBUG_LEVEL_ISR)
5868 printk("%s(%d):usc_start_receiver(%s)\n",
5869 __FILE__,__LINE__, info->device_name );
5870
5871 mgsl_reset_rx_dma_buffers( info );
5872 usc_stop_receiver( info );
5873
5874 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5875 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5876
5877 if ( info->params.mode == MGSL_MODE_HDLC ||
5878 info->params.mode == MGSL_MODE_RAW ) {
5879 /* DMA mode Transfers */
5880 /* Program the DMA controller. */
5881 /* Enable the DMA controller end of buffer interrupt. */
5882
5883 /* program 16C32 with physical address of 1st DMA buffer entry */
5884 phys_addr = info->rx_buffer_list[0].phys_entry;
5885 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5886 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5887
5888 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5889 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5890 usc_EnableInterrupts( info, RECEIVE_STATUS );
5891
5892 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5893 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5894
5895 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5896 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5897 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5898 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5899 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5900 else
5901 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5902 } else {
5903 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5904 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5905 usc_EnableInterrupts(info, RECEIVE_DATA);
5906
5907 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5908 usc_RCmd( info, RCmd_EnterHuntmode );
5909
5910 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5911 }
5912
5913 usc_OutReg( info, CCSR, 0x1020 );
5914
5915 info->rx_enabled = 1;
5916
5917 } /* end of usc_start_receiver() */
5918
5919 /* usc_start_transmitter()
5920 *
5921 * Enable the USC transmitter and send a transmit frame if
5922 * one is loaded in the DMA buffers.
5923 *
5924 * Arguments: info pointer to device instance data
5925 * Return Value: None
5926 */
usc_start_transmitter(struct mgsl_struct * info)5927 void usc_start_transmitter( struct mgsl_struct *info )
5928 {
5929 u32 phys_addr;
5930 unsigned int FrameSize;
5931
5932 if (debug_level >= DEBUG_LEVEL_ISR)
5933 printk("%s(%d):usc_start_transmitter(%s)\n",
5934 __FILE__,__LINE__, info->device_name );
5935
5936 if ( info->xmit_cnt ) {
5937
5938 /* If auto RTS enabled and RTS is inactive, then assert */
5939 /* RTS and set a flag indicating that the driver should */
5940 /* negate RTS when the transmission completes. */
5941
5942 info->drop_rts_on_tx_done = 0;
5943
5944 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5945 usc_get_serial_signals( info );
5946 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5947 info->serial_signals |= SerialSignal_RTS;
5948 usc_set_serial_signals( info );
5949 info->drop_rts_on_tx_done = 1;
5950 }
5951 }
5952
5953
5954 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5955 if ( !info->tx_active ) {
5956 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5957 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5958 usc_EnableInterrupts(info, TRANSMIT_DATA);
5959 usc_load_txfifo(info);
5960 }
5961 } else {
5962 /* Disable transmit DMA controller while programming. */
5963 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5964
5965 /* Transmit DMA buffer is loaded, so program USC */
5966 /* to send the frame contained in the buffers. */
5967
5968 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5969
5970 /* if operating in Raw sync mode, reset the rcc component
5971 * of the tx dma buffer entry, otherwise, the serial controller
5972 * will send a closing sync char after this count.
5973 */
5974 if ( info->params.mode == MGSL_MODE_RAW )
5975 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5976
5977 /* Program the Transmit Character Length Register (TCLR) */
5978 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5979 usc_OutReg( info, TCLR, (u16)FrameSize );
5980
5981 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5982
5983 /* Program the address of the 1st DMA Buffer Entry in linked list */
5984 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5985 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5986 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5987
5988 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5989 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5990 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5991
5992 if ( info->params.mode == MGSL_MODE_RAW &&
5993 info->num_tx_dma_buffers > 1 ) {
5994 /* When running external sync mode, attempt to 'stream' transmit */
5995 /* by filling tx dma buffers as they become available. To do this */
5996 /* we need to enable Tx DMA EOB Status interrupts : */
5997 /* */
5998 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5999 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
6000
6001 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
6002 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
6003 }
6004
6005 /* Initialize Transmit DMA Channel */
6006 usc_DmaCmd( info, DmaCmd_InitTxChannel );
6007
6008 usc_TCmd( info, TCmd_SendFrame );
6009
6010 info->tx_timer.expires = jiffies + jiffies_from_ms(5000);
6011 add_timer(&info->tx_timer);
6012 }
6013 info->tx_active = 1;
6014 }
6015
6016 if ( !info->tx_enabled ) {
6017 info->tx_enabled = 1;
6018 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
6019 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
6020 else
6021 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6022 }
6023
6024 } /* end of usc_start_transmitter() */
6025
6026 /* usc_stop_transmitter()
6027 *
6028 * Stops the transmitter and DMA
6029 *
6030 * Arguments: info pointer to device isntance data
6031 * Return Value: None
6032 */
usc_stop_transmitter(struct mgsl_struct * info)6033 void usc_stop_transmitter( struct mgsl_struct *info )
6034 {
6035 if (debug_level >= DEBUG_LEVEL_ISR)
6036 printk("%s(%d):usc_stop_transmitter(%s)\n",
6037 __FILE__,__LINE__, info->device_name );
6038
6039 del_timer(&info->tx_timer);
6040
6041 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6042 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
6043 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
6044
6045 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
6046 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
6047 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6048
6049 info->tx_enabled = 0;
6050 info->tx_active = 0;
6051
6052 } /* end of usc_stop_transmitter() */
6053
6054 /* usc_load_txfifo()
6055 *
6056 * Fill the transmit FIFO until the FIFO is full or
6057 * there is no more data to load.
6058 *
6059 * Arguments: info pointer to device extension (instance data)
6060 * Return Value: None
6061 */
usc_load_txfifo(struct mgsl_struct * info)6062 void usc_load_txfifo( struct mgsl_struct *info )
6063 {
6064 int Fifocount;
6065 u8 TwoBytes[2];
6066
6067 if ( !info->xmit_cnt && !info->x_char )
6068 return;
6069
6070 /* Select transmit FIFO status readback in TICR */
6071 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
6072
6073 /* load the Transmit FIFO until FIFOs full or all data sent */
6074
6075 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
6076 /* there is more space in the transmit FIFO and */
6077 /* there is more data in transmit buffer */
6078
6079 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
6080 /* write a 16-bit word from transmit buffer to 16C32 */
6081
6082 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
6083 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
6084 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
6085 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
6086
6087 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
6088
6089 info->xmit_cnt -= 2;
6090 info->icount.tx += 2;
6091 } else {
6092 /* only 1 byte left to transmit or 1 FIFO slot left */
6093
6094 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
6095 info->io_base + CCAR );
6096
6097 if (info->x_char) {
6098 /* transmit pending high priority char */
6099 outw( info->x_char,info->io_base + CCAR );
6100 info->x_char = 0;
6101 } else {
6102 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
6103 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
6104 info->xmit_cnt--;
6105 }
6106 info->icount.tx++;
6107 }
6108 }
6109
6110 } /* end of usc_load_txfifo() */
6111
6112 /* usc_reset()
6113 *
6114 * Reset the adapter to a known state and prepare it for further use.
6115 *
6116 * Arguments: info pointer to device instance data
6117 * Return Value: None
6118 */
usc_reset(struct mgsl_struct * info)6119 void usc_reset( struct mgsl_struct *info )
6120 {
6121 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
6122 int i;
6123 u32 readval;
6124
6125 /* Set BIT30 of Misc Control Register */
6126 /* (Local Control Register 0x50) to force reset of USC. */
6127
6128 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
6129 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
6130
6131 info->misc_ctrl_value |= BIT30;
6132 *MiscCtrl = info->misc_ctrl_value;
6133
6134 /*
6135 * Force at least 170ns delay before clearing
6136 * reset bit. Each read from LCR takes at least
6137 * 30ns so 10 times for 300ns to be safe.
6138 */
6139 for(i=0;i<10;i++)
6140 readval = *MiscCtrl;
6141
6142 info->misc_ctrl_value &= ~BIT30;
6143 *MiscCtrl = info->misc_ctrl_value;
6144
6145 *LCR0BRDR = BUS_DESCRIPTOR(
6146 1, // Write Strobe Hold (0-3)
6147 2, // Write Strobe Delay (0-3)
6148 2, // Read Strobe Delay (0-3)
6149 0, // NWDD (Write data-data) (0-3)
6150 4, // NWAD (Write Addr-data) (0-31)
6151 0, // NXDA (Read/Write Data-Addr) (0-3)
6152 0, // NRDD (Read Data-Data) (0-3)
6153 5 // NRAD (Read Addr-Data) (0-31)
6154 );
6155 } else {
6156 /* do HW reset */
6157 outb( 0,info->io_base + 8 );
6158 }
6159
6160 info->mbre_bit = 0;
6161 info->loopback_bits = 0;
6162 info->usc_idle_mode = 0;
6163
6164 /*
6165 * Program the Bus Configuration Register (BCR)
6166 *
6167 * <15> 0 Don't use seperate address
6168 * <14..6> 0 reserved
6169 * <5..4> 00 IAckmode = Default, don't care
6170 * <3> 1 Bus Request Totem Pole output
6171 * <2> 1 Use 16 Bit data bus
6172 * <1> 0 IRQ Totem Pole output
6173 * <0> 0 Don't Shift Right Addr
6174 *
6175 * 0000 0000 0000 1100 = 0x000c
6176 *
6177 * By writing to io_base + SDPIN the Wait/Ack pin is
6178 * programmed to work as a Wait pin.
6179 */
6180
6181 outw( 0x000c,info->io_base + SDPIN );
6182
6183
6184 outw( 0,info->io_base );
6185 outw( 0,info->io_base + CCAR );
6186
6187 /* select little endian byte ordering */
6188 usc_RTCmd( info, RTCmd_SelectLittleEndian );
6189
6190
6191 /* Port Control Register (PCR)
6192 *
6193 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
6194 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
6195 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
6196 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
6197 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
6198 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
6199 * <3..2> 01 Port 1 is Input (Dedicated RxC)
6200 * <1..0> 01 Port 0 is Input (Dedicated TxC)
6201 *
6202 * 1111 0000 1111 0101 = 0xf0f5
6203 */
6204
6205 usc_OutReg( info, PCR, 0xf0f5 );
6206
6207
6208 /*
6209 * Input/Output Control Register
6210 *
6211 * <15..14> 00 CTS is active low input
6212 * <13..12> 00 DCD is active low input
6213 * <11..10> 00 TxREQ pin is input (DSR)
6214 * <9..8> 00 RxREQ pin is input (RI)
6215 * <7..6> 00 TxD is output (Transmit Data)
6216 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
6217 * <2..0> 100 RxC is Output (drive with BRG0)
6218 *
6219 * 0000 0000 0000 0100 = 0x0004
6220 */
6221
6222 usc_OutReg( info, IOCR, 0x0004 );
6223
6224 } /* end of usc_reset() */
6225
6226 /* usc_set_async_mode()
6227 *
6228 * Program adapter for asynchronous communications.
6229 *
6230 * Arguments: info pointer to device instance data
6231 * Return Value: None
6232 */
usc_set_async_mode(struct mgsl_struct * info)6233 void usc_set_async_mode( struct mgsl_struct *info )
6234 {
6235 u16 RegValue;
6236
6237 /* disable interrupts while programming USC */
6238 usc_DisableMasterIrqBit( info );
6239
6240 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
6241 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
6242
6243 usc_loopback_frame( info );
6244
6245 /* Channel mode Register (CMR)
6246 *
6247 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
6248 * <13..12> 00 00 = 16X Clock
6249 * <11..8> 0000 Transmitter mode = Asynchronous
6250 * <7..6> 00 reserved?
6251 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
6252 * <3..0> 0000 Receiver mode = Asynchronous
6253 *
6254 * 0000 0000 0000 0000 = 0x0
6255 */
6256
6257 RegValue = 0;
6258 if ( info->params.stop_bits != 1 )
6259 RegValue |= BIT14;
6260 usc_OutReg( info, CMR, RegValue );
6261
6262
6263 /* Receiver mode Register (RMR)
6264 *
6265 * <15..13> 000 encoding = None
6266 * <12..08> 00000 reserved (Sync Only)
6267 * <7..6> 00 Even parity
6268 * <5> 0 parity disabled
6269 * <4..2> 000 Receive Char Length = 8 bits
6270 * <1..0> 00 Disable Receiver
6271 *
6272 * 0000 0000 0000 0000 = 0x0
6273 */
6274
6275 RegValue = 0;
6276
6277 if ( info->params.data_bits != 8 )
6278 RegValue |= BIT4+BIT3+BIT2;
6279
6280 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6281 RegValue |= BIT5;
6282 if ( info->params.parity != ASYNC_PARITY_ODD )
6283 RegValue |= BIT6;
6284 }
6285
6286 usc_OutReg( info, RMR, RegValue );
6287
6288
6289 /* Set IRQ trigger level */
6290
6291 usc_RCmd( info, RCmd_SelectRicrIntLevel );
6292
6293
6294 /* Receive Interrupt Control Register (RICR)
6295 *
6296 * <15..8> ? RxFIFO IRQ Request Level
6297 *
6298 * Note: For async mode the receive FIFO level must be set
6299 * to 0 to aviod the situation where the FIFO contains fewer bytes
6300 * than the trigger level and no more data is expected.
6301 *
6302 * <7> 0 Exited Hunt IA (Interrupt Arm)
6303 * <6> 0 Idle Received IA
6304 * <5> 0 Break/Abort IA
6305 * <4> 0 Rx Bound IA
6306 * <3> 0 Queued status reflects oldest byte in FIFO
6307 * <2> 0 Abort/PE IA
6308 * <1> 0 Rx Overrun IA
6309 * <0> 0 Select TC0 value for readback
6310 *
6311 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
6312 */
6313
6314 usc_OutReg( info, RICR, 0x0000 );
6315
6316 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
6317 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
6318
6319
6320 /* Transmit mode Register (TMR)
6321 *
6322 * <15..13> 000 encoding = None
6323 * <12..08> 00000 reserved (Sync Only)
6324 * <7..6> 00 Transmit parity Even
6325 * <5> 0 Transmit parity Disabled
6326 * <4..2> 000 Tx Char Length = 8 bits
6327 * <1..0> 00 Disable Transmitter
6328 *
6329 * 0000 0000 0000 0000 = 0x0
6330 */
6331
6332 RegValue = 0;
6333
6334 if ( info->params.data_bits != 8 )
6335 RegValue |= BIT4+BIT3+BIT2;
6336
6337 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6338 RegValue |= BIT5;
6339 if ( info->params.parity != ASYNC_PARITY_ODD )
6340 RegValue |= BIT6;
6341 }
6342
6343 usc_OutReg( info, TMR, RegValue );
6344
6345 usc_set_txidle( info );
6346
6347
6348 /* Set IRQ trigger level */
6349
6350 usc_TCmd( info, TCmd_SelectTicrIntLevel );
6351
6352
6353 /* Transmit Interrupt Control Register (TICR)
6354 *
6355 * <15..8> ? Transmit FIFO IRQ Level
6356 * <7> 0 Present IA (Interrupt Arm)
6357 * <6> 1 Idle Sent IA
6358 * <5> 0 Abort Sent IA
6359 * <4> 0 EOF/EOM Sent IA
6360 * <3> 0 CRC Sent IA
6361 * <2> 0 1 = Wait for SW Trigger to Start Frame
6362 * <1> 0 Tx Underrun IA
6363 * <0> 0 TC0 constant on read back
6364 *
6365 * 0000 0000 0100 0000 = 0x0040
6366 */
6367
6368 usc_OutReg( info, TICR, 0x1f40 );
6369
6370 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6371 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6372
6373 usc_enable_async_clock( info, info->params.data_rate );
6374
6375
6376 /* Channel Control/status Register (CCSR)
6377 *
6378 * <15> X RCC FIFO Overflow status (RO)
6379 * <14> X RCC FIFO Not Empty status (RO)
6380 * <13> 0 1 = Clear RCC FIFO (WO)
6381 * <12> X DPLL in Sync status (RO)
6382 * <11> X DPLL 2 Missed Clocks status (RO)
6383 * <10> X DPLL 1 Missed Clock status (RO)
6384 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6385 * <7> X SDLC Loop On status (RO)
6386 * <6> X SDLC Loop Send status (RO)
6387 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6388 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6389 * <1..0> 00 reserved
6390 *
6391 * 0000 0000 0010 0000 = 0x0020
6392 */
6393
6394 usc_OutReg( info, CCSR, 0x0020 );
6395
6396 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6397 RECEIVE_DATA + RECEIVE_STATUS );
6398
6399 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6400 RECEIVE_DATA + RECEIVE_STATUS );
6401
6402 usc_EnableMasterIrqBit( info );
6403
6404 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6405 /* Enable INTEN (Port 6, Bit12) */
6406 /* This connects the IRQ request signal to the ISA bus */
6407 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6408 }
6409
6410 } /* end of usc_set_async_mode() */
6411
6412 /* usc_loopback_frame()
6413 *
6414 * Loop back a small (2 byte) dummy SDLC frame.
6415 * Interrupts and DMA are NOT used. The purpose of this is to
6416 * clear any 'stale' status info left over from running in async mode.
6417 *
6418 * The 16C32 shows the strange behaviour of marking the 1st
6419 * received SDLC frame with a CRC error even when there is no
6420 * CRC error. To get around this a small dummy from of 2 bytes
6421 * is looped back when switching from async to sync mode.
6422 *
6423 * Arguments: info pointer to device instance data
6424 * Return Value: None
6425 */
usc_loopback_frame(struct mgsl_struct * info)6426 void usc_loopback_frame( struct mgsl_struct *info )
6427 {
6428 int i;
6429 unsigned long oldmode = info->params.mode;
6430
6431 info->params.mode = MGSL_MODE_HDLC;
6432
6433 usc_DisableMasterIrqBit( info );
6434
6435 usc_set_sdlc_mode( info );
6436 usc_enable_loopback( info, 1 );
6437
6438 /* Write 16-bit Time Constant for BRG0 */
6439 usc_OutReg( info, TC0R, 0 );
6440
6441 /* Channel Control Register (CCR)
6442 *
6443 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6444 * <13> 0 Trigger Tx on SW Command Disabled
6445 * <12> 0 Flag Preamble Disabled
6446 * <11..10> 00 Preamble Length = 8-Bits
6447 * <9..8> 01 Preamble Pattern = flags
6448 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6449 * <5> 0 Trigger Rx on SW Command Disabled
6450 * <4..0> 0 reserved
6451 *
6452 * 0000 0001 0000 0000 = 0x0100
6453 */
6454
6455 usc_OutReg( info, CCR, 0x0100 );
6456
6457 /* SETUP RECEIVER */
6458 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6459 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6460
6461 /* SETUP TRANSMITTER */
6462 /* Program the Transmit Character Length Register (TCLR) */
6463 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6464 usc_OutReg( info, TCLR, 2 );
6465 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6466
6467 /* unlatch Tx status bits, and start transmit channel. */
6468 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6469 outw(0,info->io_base + DATAREG);
6470
6471 /* ENABLE TRANSMITTER */
6472 usc_TCmd( info, TCmd_SendFrame );
6473 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6474
6475 /* WAIT FOR RECEIVE COMPLETE */
6476 for (i=0 ; i<1000 ; i++)
6477 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6478 break;
6479
6480 /* clear Internal Data loopback mode */
6481 usc_enable_loopback(info, 0);
6482
6483 usc_EnableMasterIrqBit(info);
6484
6485 info->params.mode = oldmode;
6486
6487 } /* end of usc_loopback_frame() */
6488
6489 /* usc_set_sync_mode() Programs the USC for SDLC communications.
6490 *
6491 * Arguments: info pointer to adapter info structure
6492 * Return Value: None
6493 */
usc_set_sync_mode(struct mgsl_struct * info)6494 void usc_set_sync_mode( struct mgsl_struct *info )
6495 {
6496 usc_loopback_frame( info );
6497 usc_set_sdlc_mode( info );
6498
6499 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6500 /* Enable INTEN (Port 6, Bit12) */
6501 /* This connects the IRQ request signal to the ISA bus */
6502 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6503 }
6504
6505 usc_enable_aux_clock(info, info->params.clock_speed);
6506
6507 if (info->params.loopback)
6508 usc_enable_loopback(info,1);
6509
6510 } /* end of mgsl_set_sync_mode() */
6511
6512 /* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6513 *
6514 * Arguments: info pointer to device instance data
6515 * Return Value: None
6516 */
usc_set_txidle(struct mgsl_struct * info)6517 void usc_set_txidle( struct mgsl_struct *info )
6518 {
6519 u16 usc_idle_mode = IDLEMODE_FLAGS;
6520
6521 /* Map API idle mode to USC register bits */
6522
6523 switch( info->idle_mode ){
6524 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6525 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6526 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6527 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6528 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6529 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6530 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6531 }
6532
6533 info->usc_idle_mode = usc_idle_mode;
6534 //usc_OutReg(info, TCSR, usc_idle_mode);
6535 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6536 info->tcsr_value += usc_idle_mode;
6537 usc_OutReg(info, TCSR, info->tcsr_value);
6538
6539 /*
6540 * if SyncLink WAN adapter is running in external sync mode, the
6541 * transmitter has been set to Monosync in order to try to mimic
6542 * a true raw outbound bit stream. Monosync still sends an open/close
6543 * sync char at the start/end of a frame. Try to match those sync
6544 * patterns to the idle mode set here
6545 */
6546 if ( info->params.mode == MGSL_MODE_RAW ) {
6547 unsigned char syncpat = 0;
6548 switch( info->idle_mode ) {
6549 case HDLC_TXIDLE_FLAGS:
6550 syncpat = 0x7e;
6551 break;
6552 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6553 syncpat = 0x55;
6554 break;
6555 case HDLC_TXIDLE_ZEROS:
6556 case HDLC_TXIDLE_SPACE:
6557 syncpat = 0x00;
6558 break;
6559 case HDLC_TXIDLE_ONES:
6560 case HDLC_TXIDLE_MARK:
6561 syncpat = 0xff;
6562 break;
6563 case HDLC_TXIDLE_ALT_MARK_SPACE:
6564 syncpat = 0xaa;
6565 break;
6566 }
6567
6568 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6569 }
6570
6571 } /* end of usc_set_txidle() */
6572
6573 /* usc_get_serial_signals()
6574 *
6575 * Query the adapter for the state of the V24 status (input) signals.
6576 *
6577 * Arguments: info pointer to device instance data
6578 * Return Value: None
6579 */
usc_get_serial_signals(struct mgsl_struct * info)6580 void usc_get_serial_signals( struct mgsl_struct *info )
6581 {
6582 u16 status;
6583
6584 /* clear all serial signals except DTR and RTS */
6585 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6586
6587 /* Read the Misc Interrupt status Register (MISR) to get */
6588 /* the V24 status signals. */
6589
6590 status = usc_InReg( info, MISR );
6591
6592 /* set serial signal bits to reflect MISR */
6593
6594 if ( status & MISCSTATUS_CTS )
6595 info->serial_signals |= SerialSignal_CTS;
6596
6597 if ( status & MISCSTATUS_DCD )
6598 info->serial_signals |= SerialSignal_DCD;
6599
6600 if ( status & MISCSTATUS_RI )
6601 info->serial_signals |= SerialSignal_RI;
6602
6603 if ( status & MISCSTATUS_DSR )
6604 info->serial_signals |= SerialSignal_DSR;
6605
6606 } /* end of usc_get_serial_signals() */
6607
6608 /* usc_set_serial_signals()
6609 *
6610 * Set the state of DTR and RTS based on contents of
6611 * serial_signals member of device extension.
6612 *
6613 * Arguments: info pointer to device instance data
6614 * Return Value: None
6615 */
usc_set_serial_signals(struct mgsl_struct * info)6616 void usc_set_serial_signals( struct mgsl_struct *info )
6617 {
6618 u16 Control;
6619 unsigned char V24Out = info->serial_signals;
6620
6621 /* get the current value of the Port Control Register (PCR) */
6622
6623 Control = usc_InReg( info, PCR );
6624
6625 if ( V24Out & SerialSignal_RTS )
6626 Control &= ~(BIT6);
6627 else
6628 Control |= BIT6;
6629
6630 if ( V24Out & SerialSignal_DTR )
6631 Control &= ~(BIT4);
6632 else
6633 Control |= BIT4;
6634
6635 usc_OutReg( info, PCR, Control );
6636
6637 } /* end of usc_set_serial_signals() */
6638
6639 /* usc_enable_async_clock()
6640 *
6641 * Enable the async clock at the specified frequency.
6642 *
6643 * Arguments: info pointer to device instance data
6644 * data_rate data rate of clock in bps
6645 * 0 disables the AUX clock.
6646 * Return Value: None
6647 */
usc_enable_async_clock(struct mgsl_struct * info,u32 data_rate)6648 void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6649 {
6650 if ( data_rate ) {
6651 /*
6652 * Clock mode Control Register (CMCR)
6653 *
6654 * <15..14> 00 counter 1 Disabled
6655 * <13..12> 00 counter 0 Disabled
6656 * <11..10> 11 BRG1 Input is TxC Pin
6657 * <9..8> 11 BRG0 Input is TxC Pin
6658 * <7..6> 01 DPLL Input is BRG1 Output
6659 * <5..3> 100 TxCLK comes from BRG0
6660 * <2..0> 100 RxCLK comes from BRG0
6661 *
6662 * 0000 1111 0110 0100 = 0x0f64
6663 */
6664
6665 usc_OutReg( info, CMCR, 0x0f64 );
6666
6667
6668 /*
6669 * Write 16-bit Time Constant for BRG0
6670 * Time Constant = (ClkSpeed / data_rate) - 1
6671 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6672 */
6673
6674 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6675 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6676 else
6677 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6678
6679
6680 /*
6681 * Hardware Configuration Register (HCR)
6682 * Clear Bit 1, BRG0 mode = Continuous
6683 * Set Bit 0 to enable BRG0.
6684 */
6685
6686 usc_OutReg( info, HCR,
6687 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6688
6689
6690 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6691
6692 usc_OutReg( info, IOCR,
6693 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6694 } else {
6695 /* data rate == 0 so turn off BRG0 */
6696 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6697 }
6698
6699 } /* end of usc_enable_async_clock() */
6700
6701 /*
6702 * Buffer Structures:
6703 *
6704 * Normal memory access uses virtual addresses that can make discontiguous
6705 * physical memory pages appear to be contiguous in the virtual address
6706 * space (the processors memory mapping handles the conversions).
6707 *
6708 * DMA transfers require physically contiguous memory. This is because
6709 * the DMA system controller and DMA bus masters deal with memory using
6710 * only physical addresses.
6711 *
6712 * This causes a problem under Windows NT when large DMA buffers are
6713 * needed. Fragmentation of the nonpaged pool prevents allocations of
6714 * physically contiguous buffers larger than the PAGE_SIZE.
6715 *
6716 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6717 * allows DMA transfers to physically discontiguous buffers. Information
6718 * about each data transfer buffer is contained in a memory structure
6719 * called a 'buffer entry'. A list of buffer entries is maintained
6720 * to track and control the use of the data transfer buffers.
6721 *
6722 * To support this strategy we will allocate sufficient PAGE_SIZE
6723 * contiguous memory buffers to allow for the total required buffer
6724 * space.
6725 *
6726 * The 16C32 accesses the list of buffer entries using Bus Master
6727 * DMA. Control information is read from the buffer entries by the
6728 * 16C32 to control data transfers. status information is written to
6729 * the buffer entries by the 16C32 to indicate the status of completed
6730 * transfers.
6731 *
6732 * The CPU writes control information to the buffer entries to control
6733 * the 16C32 and reads status information from the buffer entries to
6734 * determine information about received and transmitted frames.
6735 *
6736 * Because the CPU and 16C32 (adapter) both need simultaneous access
6737 * to the buffer entries, the buffer entry memory is allocated with
6738 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6739 * entry list to PAGE_SIZE.
6740 *
6741 * The actual data buffers on the other hand will only be accessed
6742 * by the CPU or the adapter but not by both simultaneously. This allows
6743 * Scatter/Gather packet based DMA procedures for using physically
6744 * discontiguous pages.
6745 */
6746
6747 /*
6748 * mgsl_reset_tx_dma_buffers()
6749 *
6750 * Set the count for all transmit buffers to 0 to indicate the
6751 * buffer is available for use and set the current buffer to the
6752 * first buffer. This effectively makes all buffers free and
6753 * discards any data in buffers.
6754 *
6755 * Arguments: info pointer to device instance data
6756 * Return Value: None
6757 */
mgsl_reset_tx_dma_buffers(struct mgsl_struct * info)6758 void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6759 {
6760 unsigned int i;
6761
6762 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6763 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6764 }
6765
6766 info->current_tx_buffer = 0;
6767 info->start_tx_dma_buffer = 0;
6768 info->tx_dma_buffers_used = 0;
6769
6770 info->get_tx_holding_index = 0;
6771 info->put_tx_holding_index = 0;
6772 info->tx_holding_count = 0;
6773
6774 } /* end of mgsl_reset_tx_dma_buffers() */
6775
6776 /*
6777 * num_free_tx_dma_buffers()
6778 *
6779 * returns the number of free tx dma buffers available
6780 *
6781 * Arguments: info pointer to device instance data
6782 * Return Value: number of free tx dma buffers
6783 */
num_free_tx_dma_buffers(struct mgsl_struct * info)6784 int num_free_tx_dma_buffers(struct mgsl_struct *info)
6785 {
6786 return info->tx_buffer_count - info->tx_dma_buffers_used;
6787 }
6788
6789 /*
6790 * mgsl_reset_rx_dma_buffers()
6791 *
6792 * Set the count for all receive buffers to DMABUFFERSIZE
6793 * and set the current buffer to the first buffer. This effectively
6794 * makes all buffers free and discards any data in buffers.
6795 *
6796 * Arguments: info pointer to device instance data
6797 * Return Value: None
6798 */
mgsl_reset_rx_dma_buffers(struct mgsl_struct * info)6799 void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6800 {
6801 unsigned int i;
6802
6803 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6804 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6805 // info->rx_buffer_list[i].count = DMABUFFERSIZE;
6806 // info->rx_buffer_list[i].status = 0;
6807 }
6808
6809 info->current_rx_buffer = 0;
6810
6811 } /* end of mgsl_reset_rx_dma_buffers() */
6812
6813 /*
6814 * mgsl_free_rx_frame_buffers()
6815 *
6816 * Free the receive buffers used by a received SDLC
6817 * frame such that the buffers can be reused.
6818 *
6819 * Arguments:
6820 *
6821 * info pointer to device instance data
6822 * StartIndex index of 1st receive buffer of frame
6823 * EndIndex index of last receive buffer of frame
6824 *
6825 * Return Value: None
6826 */
mgsl_free_rx_frame_buffers(struct mgsl_struct * info,unsigned int StartIndex,unsigned int EndIndex)6827 void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6828 {
6829 int Done = 0;
6830 DMABUFFERENTRY *pBufEntry;
6831 unsigned int Index;
6832
6833 /* Starting with 1st buffer entry of the frame clear the status */
6834 /* field and set the count field to DMA Buffer Size. */
6835
6836 Index = StartIndex;
6837
6838 while( !Done ) {
6839 pBufEntry = &(info->rx_buffer_list[Index]);
6840
6841 if ( Index == EndIndex ) {
6842 /* This is the last buffer of the frame! */
6843 Done = 1;
6844 }
6845
6846 /* reset current buffer for reuse */
6847 // pBufEntry->status = 0;
6848 // pBufEntry->count = DMABUFFERSIZE;
6849 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6850
6851 /* advance to next buffer entry in linked list */
6852 Index++;
6853 if ( Index == info->rx_buffer_count )
6854 Index = 0;
6855 }
6856
6857 /* set current buffer to next buffer after last buffer of frame */
6858 info->current_rx_buffer = Index;
6859
6860 } /* end of free_rx_frame_buffers() */
6861
6862 /* mgsl_get_rx_frame()
6863 *
6864 * This function attempts to return a received SDLC frame from the
6865 * receive DMA buffers. Only frames received without errors are returned.
6866 *
6867 * Arguments: info pointer to device extension
6868 * Return Value: 1 if frame returned, otherwise 0
6869 */
mgsl_get_rx_frame(struct mgsl_struct * info)6870 int mgsl_get_rx_frame(struct mgsl_struct *info)
6871 {
6872 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6873 unsigned short status;
6874 DMABUFFERENTRY *pBufEntry;
6875 unsigned int framesize = 0;
6876 int ReturnCode = 0;
6877 unsigned long flags;
6878 struct tty_struct *tty = info->tty;
6879 int return_frame = 0;
6880
6881 /*
6882 * current_rx_buffer points to the 1st buffer of the next available
6883 * receive frame. To find the last buffer of the frame look for
6884 * a non-zero status field in the buffer entries. (The status
6885 * field is set by the 16C32 after completing a receive frame.
6886 */
6887
6888 StartIndex = EndIndex = info->current_rx_buffer;
6889
6890 while( !info->rx_buffer_list[EndIndex].status ) {
6891 /*
6892 * If the count field of the buffer entry is non-zero then
6893 * this buffer has not been used. (The 16C32 clears the count
6894 * field when it starts using the buffer.) If an unused buffer
6895 * is encountered then there are no frames available.
6896 */
6897
6898 if ( info->rx_buffer_list[EndIndex].count )
6899 goto Cleanup;
6900
6901 /* advance to next buffer entry in linked list */
6902 EndIndex++;
6903 if ( EndIndex == info->rx_buffer_count )
6904 EndIndex = 0;
6905
6906 /* if entire list searched then no frame available */
6907 if ( EndIndex == StartIndex ) {
6908 /* If this occurs then something bad happened,
6909 * all buffers have been 'used' but none mark
6910 * the end of a frame. Reset buffers and receiver.
6911 */
6912
6913 if ( info->rx_enabled ){
6914 spin_lock_irqsave(&info->irq_spinlock,flags);
6915 usc_start_receiver(info);
6916 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6917 }
6918 goto Cleanup;
6919 }
6920 }
6921
6922
6923 /* check status of receive frame */
6924
6925 status = info->rx_buffer_list[EndIndex].status;
6926
6927 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6928 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6929 if ( status & RXSTATUS_SHORT_FRAME )
6930 info->icount.rxshort++;
6931 else if ( status & RXSTATUS_ABORT )
6932 info->icount.rxabort++;
6933 else if ( status & RXSTATUS_OVERRUN )
6934 info->icount.rxover++;
6935 else {
6936 info->icount.rxcrc++;
6937 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6938 return_frame = 1;
6939 }
6940 framesize = 0;
6941 #ifdef CONFIG_SYNCLINK_SYNCPPP
6942 info->netstats.rx_errors++;
6943 info->netstats.rx_frame_errors++;
6944 #endif
6945 } else
6946 return_frame = 1;
6947
6948 if ( return_frame ) {
6949 /* receive frame has no errors, get frame size.
6950 * The frame size is the starting value of the RCC (which was
6951 * set to 0xffff) minus the ending value of the RCC (decremented
6952 * once for each receive character) minus 2 for the 16-bit CRC.
6953 */
6954
6955 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6956
6957 /* adjust frame size for CRC if any */
6958 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6959 framesize -= 2;
6960 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6961 framesize -= 4;
6962 }
6963
6964 if ( debug_level >= DEBUG_LEVEL_BH )
6965 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6966 __FILE__,__LINE__,info->device_name,status,framesize);
6967
6968 if ( debug_level >= DEBUG_LEVEL_DATA )
6969 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6970 MIN(framesize,DMABUFFERSIZE),0);
6971
6972 if (framesize) {
6973 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6974 ((framesize+1) > info->max_frame_size) ) ||
6975 (framesize > info->max_frame_size) )
6976 info->icount.rxlong++;
6977 else {
6978 /* copy dma buffer(s) to contiguous intermediate buffer */
6979 int copy_count = framesize;
6980 int index = StartIndex;
6981 unsigned char *ptmp = info->intermediate_rxbuffer;
6982
6983 if ( !(status & RXSTATUS_CRC_ERROR))
6984 info->icount.rxok++;
6985
6986 while(copy_count) {
6987 int partial_count;
6988 if ( copy_count > DMABUFFERSIZE )
6989 partial_count = DMABUFFERSIZE;
6990 else
6991 partial_count = copy_count;
6992
6993 pBufEntry = &(info->rx_buffer_list[index]);
6994 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6995 ptmp += partial_count;
6996 copy_count -= partial_count;
6997
6998 if ( ++index == info->rx_buffer_count )
6999 index = 0;
7000 }
7001
7002 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
7003 ++framesize;
7004 *ptmp = (status & RXSTATUS_CRC_ERROR ?
7005 RX_CRC_ERROR :
7006 RX_OK);
7007
7008 if ( debug_level >= DEBUG_LEVEL_DATA )
7009 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
7010 __FILE__,__LINE__,info->device_name,
7011 *ptmp);
7012 }
7013
7014 #ifdef CONFIG_SYNCLINK_SYNCPPP
7015 if (info->netcount) {
7016 /* pass frame to syncppp device */
7017 mgsl_sppp_rx_done(info,info->intermediate_rxbuffer,framesize);
7018 }
7019 else
7020 #endif
7021 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
7022 }
7023 }
7024 /* Free the buffers used by this frame. */
7025 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
7026
7027 ReturnCode = 1;
7028
7029 Cleanup:
7030
7031 if ( info->rx_enabled && info->rx_overflow ) {
7032 /* The receiver needs to restarted because of
7033 * a receive overflow (buffer or FIFO). If the
7034 * receive buffers are now empty, then restart receiver.
7035 */
7036
7037 if ( !info->rx_buffer_list[EndIndex].status &&
7038 info->rx_buffer_list[EndIndex].count ) {
7039 spin_lock_irqsave(&info->irq_spinlock,flags);
7040 usc_start_receiver(info);
7041 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7042 }
7043 }
7044
7045 return ReturnCode;
7046
7047 } /* end of mgsl_get_rx_frame() */
7048
7049 /* mgsl_get_raw_rx_frame()
7050 *
7051 * This function attempts to return a received frame from the
7052 * receive DMA buffers when running in external loop mode. In this mode,
7053 * we will return at most one DMABUFFERSIZE frame to the application.
7054 * The USC receiver is triggering off of DCD going active to start a new
7055 * frame, and DCD going inactive to terminate the frame (similar to
7056 * processing a closing flag character).
7057 *
7058 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
7059 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
7060 * status field and the RCC field will indicate the length of the
7061 * entire received frame. We take this RCC field and get the modulus
7062 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
7063 * last Rx DMA buffer and return that last portion of the frame.
7064 *
7065 * Arguments: info pointer to device extension
7066 * Return Value: 1 if frame returned, otherwise 0
7067 */
mgsl_get_raw_rx_frame(struct mgsl_struct * info)7068 int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
7069 {
7070 unsigned int CurrentIndex, NextIndex;
7071 unsigned short status;
7072 DMABUFFERENTRY *pBufEntry;
7073 unsigned int framesize = 0;
7074 int ReturnCode = 0;
7075 unsigned long flags;
7076 struct tty_struct *tty = info->tty;
7077
7078 /*
7079 * current_rx_buffer points to the 1st buffer of the next available
7080 * receive frame. The status field is set by the 16C32 after
7081 * completing a receive frame. If the status field of this buffer
7082 * is zero, either the USC is still filling this buffer or this
7083 * is one of a series of buffers making up a received frame.
7084 *
7085 * If the count field of this buffer is zero, the USC is either
7086 * using this buffer or has used this buffer. Look at the count
7087 * field of the next buffer. If that next buffer's count is
7088 * non-zero, the USC is still actively using the current buffer.
7089 * Otherwise, if the next buffer's count field is zero, the
7090 * current buffer is complete and the USC is using the next
7091 * buffer.
7092 */
7093 CurrentIndex = NextIndex = info->current_rx_buffer;
7094 ++NextIndex;
7095 if ( NextIndex == info->rx_buffer_count )
7096 NextIndex = 0;
7097
7098 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
7099 (info->rx_buffer_list[CurrentIndex].count == 0 &&
7100 info->rx_buffer_list[NextIndex].count == 0)) {
7101 /*
7102 * Either the status field of this dma buffer is non-zero
7103 * (indicating the last buffer of a receive frame) or the next
7104 * buffer is marked as in use -- implying this buffer is complete
7105 * and an intermediate buffer for this received frame.
7106 */
7107
7108 status = info->rx_buffer_list[CurrentIndex].status;
7109
7110 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
7111 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
7112 if ( status & RXSTATUS_SHORT_FRAME )
7113 info->icount.rxshort++;
7114 else if ( status & RXSTATUS_ABORT )
7115 info->icount.rxabort++;
7116 else if ( status & RXSTATUS_OVERRUN )
7117 info->icount.rxover++;
7118 else
7119 info->icount.rxcrc++;
7120 framesize = 0;
7121 } else {
7122 /*
7123 * A receive frame is available, get frame size and status.
7124 *
7125 * The frame size is the starting value of the RCC (which was
7126 * set to 0xffff) minus the ending value of the RCC (decremented
7127 * once for each receive character) minus 2 or 4 for the 16-bit
7128 * or 32-bit CRC.
7129 *
7130 * If the status field is zero, this is an intermediate buffer.
7131 * It's size is 4K.
7132 *
7133 * If the DMA Buffer Entry's Status field is non-zero, the
7134 * receive operation completed normally (ie: DCD dropped). The
7135 * RCC field is valid and holds the received frame size.
7136 * It is possible that the RCC field will be zero on a DMA buffer
7137 * entry with a non-zero status. This can occur if the total
7138 * frame size (number of bytes between the time DCD goes active
7139 * to the time DCD goes inactive) exceeds 65535 bytes. In this
7140 * case the 16C32 has underrun on the RCC count and appears to
7141 * stop updating this counter to let us know the actual received
7142 * frame size. If this happens (non-zero status and zero RCC),
7143 * simply return the entire RxDMA Buffer
7144 */
7145 if ( status ) {
7146 /*
7147 * In the event that the final RxDMA Buffer is
7148 * terminated with a non-zero status and the RCC
7149 * field is zero, we interpret this as the RCC
7150 * having underflowed (received frame > 65535 bytes).
7151 *
7152 * Signal the event to the user by passing back
7153 * a status of RxStatus_CrcError returning the full
7154 * buffer and let the app figure out what data is
7155 * actually valid
7156 */
7157 if ( info->rx_buffer_list[CurrentIndex].rcc )
7158 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
7159 else
7160 framesize = DMABUFFERSIZE;
7161 }
7162 else
7163 framesize = DMABUFFERSIZE;
7164 }
7165
7166 if ( framesize > DMABUFFERSIZE ) {
7167 /*
7168 * if running in raw sync mode, ISR handler for
7169 * End Of Buffer events terminates all buffers at 4K.
7170 * If this frame size is said to be >4K, get the
7171 * actual number of bytes of the frame in this buffer.
7172 */
7173 framesize = framesize % DMABUFFERSIZE;
7174 }
7175
7176
7177 if ( debug_level >= DEBUG_LEVEL_BH )
7178 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
7179 __FILE__,__LINE__,info->device_name,status,framesize);
7180
7181 if ( debug_level >= DEBUG_LEVEL_DATA )
7182 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
7183 MIN(framesize,DMABUFFERSIZE),0);
7184
7185 if (framesize) {
7186 /* copy dma buffer(s) to contiguous intermediate buffer */
7187 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
7188
7189 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
7190 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
7191 info->icount.rxok++;
7192
7193 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
7194 }
7195
7196 /* Free the buffers used by this frame. */
7197 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
7198
7199 ReturnCode = 1;
7200 }
7201
7202
7203 if ( info->rx_enabled && info->rx_overflow ) {
7204 /* The receiver needs to restarted because of
7205 * a receive overflow (buffer or FIFO). If the
7206 * receive buffers are now empty, then restart receiver.
7207 */
7208
7209 if ( !info->rx_buffer_list[CurrentIndex].status &&
7210 info->rx_buffer_list[CurrentIndex].count ) {
7211 spin_lock_irqsave(&info->irq_spinlock,flags);
7212 usc_start_receiver(info);
7213 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7214 }
7215 }
7216
7217 return ReturnCode;
7218
7219 } /* end of mgsl_get_raw_rx_frame() */
7220
7221 /* mgsl_load_tx_dma_buffer()
7222 *
7223 * Load the transmit DMA buffer with the specified data.
7224 *
7225 * Arguments:
7226 *
7227 * info pointer to device extension
7228 * Buffer pointer to buffer containing frame to load
7229 * BufferSize size in bytes of frame in Buffer
7230 *
7231 * Return Value: None
7232 */
mgsl_load_tx_dma_buffer(struct mgsl_struct * info,const char * Buffer,unsigned int BufferSize)7233 void mgsl_load_tx_dma_buffer(struct mgsl_struct *info, const char *Buffer,
7234 unsigned int BufferSize)
7235 {
7236 unsigned short Copycount;
7237 unsigned int i = 0;
7238 DMABUFFERENTRY *pBufEntry;
7239
7240 if ( debug_level >= DEBUG_LEVEL_DATA )
7241 mgsl_trace_block(info,Buffer, MIN(BufferSize,DMABUFFERSIZE), 1);
7242
7243 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7244 /* set CMR:13 to start transmit when
7245 * next GoAhead (abort) is received
7246 */
7247 info->cmr_value |= BIT13;
7248 }
7249
7250 /* begin loading the frame in the next available tx dma
7251 * buffer, remember it's starting location for setting
7252 * up tx dma operation
7253 */
7254 i = info->current_tx_buffer;
7255 info->start_tx_dma_buffer = i;
7256
7257 /* Setup the status and RCC (Frame Size) fields of the 1st */
7258 /* buffer entry in the transmit DMA buffer list. */
7259
7260 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
7261 info->tx_buffer_list[i].rcc = BufferSize;
7262 info->tx_buffer_list[i].count = BufferSize;
7263
7264 /* Copy frame data from 1st source buffer to the DMA buffers. */
7265 /* The frame data may span multiple DMA buffers. */
7266
7267 while( BufferSize ){
7268 /* Get a pointer to next DMA buffer entry. */
7269 pBufEntry = &info->tx_buffer_list[i++];
7270
7271 if ( i == info->tx_buffer_count )
7272 i=0;
7273
7274 /* Calculate the number of bytes that can be copied from */
7275 /* the source buffer to this DMA buffer. */
7276 if ( BufferSize > DMABUFFERSIZE )
7277 Copycount = DMABUFFERSIZE;
7278 else
7279 Copycount = BufferSize;
7280
7281 /* Actually copy data from source buffer to DMA buffer. */
7282 /* Also set the data count for this individual DMA buffer. */
7283 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
7284 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
7285 else
7286 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
7287
7288 pBufEntry->count = Copycount;
7289
7290 /* Advance source pointer and reduce remaining data count. */
7291 Buffer += Copycount;
7292 BufferSize -= Copycount;
7293
7294 ++info->tx_dma_buffers_used;
7295 }
7296
7297 /* remember next available tx dma buffer */
7298 info->current_tx_buffer = i;
7299
7300 } /* end of mgsl_load_tx_dma_buffer() */
7301
7302 /*
7303 * mgsl_register_test()
7304 *
7305 * Performs a register test of the 16C32.
7306 *
7307 * Arguments: info pointer to device instance data
7308 * Return Value: TRUE if test passed, otherwise FALSE
7309 */
mgsl_register_test(struct mgsl_struct * info)7310 BOOLEAN mgsl_register_test( struct mgsl_struct *info )
7311 {
7312 static unsigned short BitPatterns[] =
7313 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
7314 static unsigned int Patterncount = sizeof(BitPatterns)/sizeof(unsigned short);
7315 unsigned int i;
7316 BOOLEAN rc = TRUE;
7317 unsigned long flags;
7318
7319 spin_lock_irqsave(&info->irq_spinlock,flags);
7320 usc_reset(info);
7321
7322 /* Verify the reset state of some registers. */
7323
7324 if ( (usc_InReg( info, SICR ) != 0) ||
7325 (usc_InReg( info, IVR ) != 0) ||
7326 (usc_InDmaReg( info, DIVR ) != 0) ){
7327 rc = FALSE;
7328 }
7329
7330 if ( rc == TRUE ){
7331 /* Write bit patterns to various registers but do it out of */
7332 /* sync, then read back and verify values. */
7333
7334 for ( i = 0 ; i < Patterncount ; i++ ) {
7335 usc_OutReg( info, TC0R, BitPatterns[i] );
7336 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
7337 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
7338 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
7339 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
7340 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
7341
7342 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
7343 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
7344 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
7345 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
7346 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
7347 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
7348 rc = FALSE;
7349 break;
7350 }
7351 }
7352 }
7353
7354 usc_reset(info);
7355 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7356
7357 return rc;
7358
7359 } /* end of mgsl_register_test() */
7360
7361 /* mgsl_irq_test() Perform interrupt test of the 16C32.
7362 *
7363 * Arguments: info pointer to device instance data
7364 * Return Value: TRUE if test passed, otherwise FALSE
7365 */
mgsl_irq_test(struct mgsl_struct * info)7366 BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
7367 {
7368 unsigned long EndTime;
7369 unsigned long flags;
7370
7371 spin_lock_irqsave(&info->irq_spinlock,flags);
7372 usc_reset(info);
7373
7374 /*
7375 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7376 * The ISR sets irq_occurred to 1.
7377 */
7378
7379 info->irq_occurred = FALSE;
7380
7381 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7382 /* Enable INTEN (Port 6, Bit12) */
7383 /* This connects the IRQ request signal to the ISA bus */
7384 /* on the ISA adapter. This has no effect for the PCI adapter */
7385 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7386
7387 usc_EnableMasterIrqBit(info);
7388 usc_EnableInterrupts(info, IO_PIN);
7389 usc_ClearIrqPendingBits(info, IO_PIN);
7390
7391 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7392 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7393
7394 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7395
7396 EndTime=100;
7397 while( EndTime-- && !info->irq_occurred ) {
7398 set_current_state(TASK_INTERRUPTIBLE);
7399 schedule_timeout(jiffies_from_ms(10));
7400 }
7401
7402 spin_lock_irqsave(&info->irq_spinlock,flags);
7403 usc_reset(info);
7404 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7405
7406 if ( !info->irq_occurred )
7407 return FALSE;
7408 else
7409 return TRUE;
7410
7411 } /* end of mgsl_irq_test() */
7412
7413 /* mgsl_dma_test()
7414 *
7415 * Perform a DMA test of the 16C32. A small frame is
7416 * transmitted via DMA from a transmit buffer to a receive buffer
7417 * using single buffer DMA mode.
7418 *
7419 * Arguments: info pointer to device instance data
7420 * Return Value: TRUE if test passed, otherwise FALSE
7421 */
mgsl_dma_test(struct mgsl_struct * info)7422 BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
7423 {
7424 unsigned short FifoLevel;
7425 unsigned long phys_addr;
7426 unsigned int FrameSize;
7427 unsigned int i;
7428 char *TmpPtr;
7429 BOOLEAN rc = TRUE;
7430 unsigned short status=0;
7431 unsigned long EndTime;
7432 unsigned long flags;
7433 MGSL_PARAMS tmp_params;
7434
7435 /* save current port options */
7436 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7437 /* load default port options */
7438 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7439
7440 #define TESTFRAMESIZE 40
7441
7442 spin_lock_irqsave(&info->irq_spinlock,flags);
7443
7444 /* setup 16C32 for SDLC DMA transfer mode */
7445
7446 usc_reset(info);
7447 usc_set_sdlc_mode(info);
7448 usc_enable_loopback(info,1);
7449
7450 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7451 * field of the buffer entry after fetching buffer address. This
7452 * way we can detect a DMA failure for a DMA read (which should be
7453 * non-destructive to system memory) before we try and write to
7454 * memory (where a failure could corrupt system memory).
7455 */
7456
7457 /* Receive DMA mode Register (RDMR)
7458 *
7459 * <15..14> 11 DMA mode = Linked List Buffer mode
7460 * <13> 1 RSBinA/L = store Rx status Block in List entry
7461 * <12> 0 1 = Clear count of List Entry after fetching
7462 * <11..10> 00 Address mode = Increment
7463 * <9> 1 Terminate Buffer on RxBound
7464 * <8> 0 Bus Width = 16bits
7465 * <7..0> ? status Bits (write as 0s)
7466 *
7467 * 1110 0010 0000 0000 = 0xe200
7468 */
7469
7470 usc_OutDmaReg( info, RDMR, 0xe200 );
7471
7472 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7473
7474
7475 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7476
7477 FrameSize = TESTFRAMESIZE;
7478
7479 /* setup 1st transmit buffer entry: */
7480 /* with frame size and transmit control word */
7481
7482 info->tx_buffer_list[0].count = FrameSize;
7483 info->tx_buffer_list[0].rcc = FrameSize;
7484 info->tx_buffer_list[0].status = 0x4000;
7485
7486 /* build a transmit frame in 1st transmit DMA buffer */
7487
7488 TmpPtr = info->tx_buffer_list[0].virt_addr;
7489 for (i = 0; i < FrameSize; i++ )
7490 *TmpPtr++ = i;
7491
7492 /* setup 1st receive buffer entry: */
7493 /* clear status, set max receive buffer size */
7494
7495 info->rx_buffer_list[0].status = 0;
7496 info->rx_buffer_list[0].count = FrameSize + 4;
7497
7498 /* zero out the 1st receive buffer */
7499
7500 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7501
7502 /* Set count field of next buffer entries to prevent */
7503 /* 16C32 from using buffers after the 1st one. */
7504
7505 info->tx_buffer_list[1].count = 0;
7506 info->rx_buffer_list[1].count = 0;
7507
7508
7509 /***************************/
7510 /* Program 16C32 receiver. */
7511 /***************************/
7512
7513 spin_lock_irqsave(&info->irq_spinlock,flags);
7514
7515 /* setup DMA transfers */
7516 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7517
7518 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7519 phys_addr = info->rx_buffer_list[0].phys_entry;
7520 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7521 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7522
7523 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7524 usc_InDmaReg( info, RDMR );
7525 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7526
7527 /* Enable Receiver (RMR <1..0> = 10) */
7528 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7529
7530 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7531
7532
7533 /*************************************************************/
7534 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7535 /*************************************************************/
7536
7537 /* Wait 100ms for interrupt. */
7538 EndTime = jiffies + jiffies_from_ms(100);
7539
7540 for(;;) {
7541 if (time_after(jiffies, EndTime)) {
7542 rc = FALSE;
7543 break;
7544 }
7545
7546 spin_lock_irqsave(&info->irq_spinlock,flags);
7547 status = usc_InDmaReg( info, RDMR );
7548 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7549
7550 if ( !(status & BIT4) && (status & BIT5) ) {
7551 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7552 /* BUSY (BIT 5) is active (channel still active). */
7553 /* This means the buffer entry read has completed. */
7554 break;
7555 }
7556 }
7557
7558
7559 /******************************/
7560 /* Program 16C32 transmitter. */
7561 /******************************/
7562
7563 spin_lock_irqsave(&info->irq_spinlock,flags);
7564
7565 /* Program the Transmit Character Length Register (TCLR) */
7566 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7567
7568 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7569 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7570
7571 /* Program the address of the 1st DMA Buffer Entry in linked list */
7572
7573 phys_addr = info->tx_buffer_list[0].phys_entry;
7574 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7575 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7576
7577 /* unlatch Tx status bits, and start transmit channel. */
7578
7579 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7580 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7581
7582 /* wait for DMA controller to fill transmit FIFO */
7583
7584 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7585
7586 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7587
7588
7589 /**********************************/
7590 /* WAIT FOR TRANSMIT FIFO TO FILL */
7591 /**********************************/
7592
7593 /* Wait 100ms */
7594 EndTime = jiffies + jiffies_from_ms(100);
7595
7596 for(;;) {
7597 if (time_after(jiffies, EndTime)) {
7598 rc = FALSE;
7599 break;
7600 }
7601
7602 spin_lock_irqsave(&info->irq_spinlock,flags);
7603 FifoLevel = usc_InReg(info, TICR) >> 8;
7604 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7605
7606 if ( FifoLevel < 16 )
7607 break;
7608 else
7609 if ( FrameSize < 32 ) {
7610 /* This frame is smaller than the entire transmit FIFO */
7611 /* so wait for the entire frame to be loaded. */
7612 if ( FifoLevel <= (32 - FrameSize) )
7613 break;
7614 }
7615 }
7616
7617
7618 if ( rc == TRUE )
7619 {
7620 /* Enable 16C32 transmitter. */
7621
7622 spin_lock_irqsave(&info->irq_spinlock,flags);
7623
7624 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7625 usc_TCmd( info, TCmd_SendFrame );
7626 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7627
7628 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7629
7630
7631 /******************************/
7632 /* WAIT FOR TRANSMIT COMPLETE */
7633 /******************************/
7634
7635 /* Wait 100ms */
7636 EndTime = jiffies + jiffies_from_ms(100);
7637
7638 /* While timer not expired wait for transmit complete */
7639
7640 spin_lock_irqsave(&info->irq_spinlock,flags);
7641 status = usc_InReg( info, TCSR );
7642 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7643
7644 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7645 if (time_after(jiffies, EndTime)) {
7646 rc = FALSE;
7647 break;
7648 }
7649
7650 spin_lock_irqsave(&info->irq_spinlock,flags);
7651 status = usc_InReg( info, TCSR );
7652 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7653 }
7654 }
7655
7656
7657 if ( rc == TRUE ){
7658 /* CHECK FOR TRANSMIT ERRORS */
7659 if ( status & (BIT5 + BIT1) )
7660 rc = FALSE;
7661 }
7662
7663 if ( rc == TRUE ) {
7664 /* WAIT FOR RECEIVE COMPLETE */
7665
7666 /* Wait 100ms */
7667 EndTime = jiffies + jiffies_from_ms(100);
7668
7669 /* Wait for 16C32 to write receive status to buffer entry. */
7670 status=info->rx_buffer_list[0].status;
7671 while ( status == 0 ) {
7672 if (time_after(jiffies, EndTime)) {
7673 rc = FALSE;
7674 break;
7675 }
7676 status=info->rx_buffer_list[0].status;
7677 }
7678 }
7679
7680
7681 if ( rc == TRUE ) {
7682 /* CHECK FOR RECEIVE ERRORS */
7683 status = info->rx_buffer_list[0].status;
7684
7685 if ( status & (BIT8 + BIT3 + BIT1) ) {
7686 /* receive error has occured */
7687 rc = FALSE;
7688 } else {
7689 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7690 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7691 rc = FALSE;
7692 }
7693 }
7694 }
7695
7696 spin_lock_irqsave(&info->irq_spinlock,flags);
7697 usc_reset( info );
7698 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7699
7700 /* restore current port options */
7701 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7702
7703 return rc;
7704
7705 } /* end of mgsl_dma_test() */
7706
7707 /* mgsl_adapter_test()
7708 *
7709 * Perform the register, IRQ, and DMA tests for the 16C32.
7710 *
7711 * Arguments: info pointer to device instance data
7712 * Return Value: 0 if success, otherwise -ENODEV
7713 */
mgsl_adapter_test(struct mgsl_struct * info)7714 int mgsl_adapter_test( struct mgsl_struct *info )
7715 {
7716 if ( debug_level >= DEBUG_LEVEL_INFO )
7717 printk( "%s(%d):Testing device %s\n",
7718 __FILE__,__LINE__,info->device_name );
7719
7720 if ( !mgsl_register_test( info ) ) {
7721 info->init_error = DiagStatus_AddressFailure;
7722 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7723 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7724 return -ENODEV;
7725 }
7726
7727 if ( !mgsl_irq_test( info ) ) {
7728 info->init_error = DiagStatus_IrqFailure;
7729 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7730 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7731 return -ENODEV;
7732 }
7733
7734 if ( !mgsl_dma_test( info ) ) {
7735 info->init_error = DiagStatus_DmaFailure;
7736 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7737 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7738 return -ENODEV;
7739 }
7740
7741 if ( debug_level >= DEBUG_LEVEL_INFO )
7742 printk( "%s(%d):device %s passed diagnostics\n",
7743 __FILE__,__LINE__,info->device_name );
7744
7745 return 0;
7746
7747 } /* end of mgsl_adapter_test() */
7748
7749 /* mgsl_memory_test()
7750 *
7751 * Test the shared memory on a PCI adapter.
7752 *
7753 * Arguments: info pointer to device instance data
7754 * Return Value: TRUE if test passed, otherwise FALSE
7755 */
mgsl_memory_test(struct mgsl_struct * info)7756 BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
7757 {
7758 static unsigned long BitPatterns[] = { 0x0, 0x55555555, 0xaaaaaaaa,
7759 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7760 unsigned long Patterncount = sizeof(BitPatterns)/sizeof(unsigned long);
7761 unsigned long i;
7762 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7763 unsigned long * TestAddr;
7764
7765 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7766 return TRUE;
7767
7768 TestAddr = (unsigned long *)info->memory_base;
7769
7770 /* Test data lines with test pattern at one location. */
7771
7772 for ( i = 0 ; i < Patterncount ; i++ ) {
7773 *TestAddr = BitPatterns[i];
7774 if ( *TestAddr != BitPatterns[i] )
7775 return FALSE;
7776 }
7777
7778 /* Test address lines with incrementing pattern over */
7779 /* entire address range. */
7780
7781 for ( i = 0 ; i < TestLimit ; i++ ) {
7782 *TestAddr = i * 4;
7783 TestAddr++;
7784 }
7785
7786 TestAddr = (unsigned long *)info->memory_base;
7787
7788 for ( i = 0 ; i < TestLimit ; i++ ) {
7789 if ( *TestAddr != i * 4 )
7790 return FALSE;
7791 TestAddr++;
7792 }
7793
7794 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7795
7796 return TRUE;
7797
7798 } /* End Of mgsl_memory_test() */
7799
7800
7801 /* mgsl_load_pci_memory()
7802 *
7803 * Load a large block of data into the PCI shared memory.
7804 * Use this instead of memcpy() or memmove() to move data
7805 * into the PCI shared memory.
7806 *
7807 * Notes:
7808 *
7809 * This function prevents the PCI9050 interface chip from hogging
7810 * the adapter local bus, which can starve the 16C32 by preventing
7811 * 16C32 bus master cycles.
7812 *
7813 * The PCI9050 documentation says that the 9050 will always release
7814 * control of the local bus after completing the current read
7815 * or write operation.
7816 *
7817 * It appears that as long as the PCI9050 write FIFO is full, the
7818 * PCI9050 treats all of the writes as a single burst transaction
7819 * and will not release the bus. This causes DMA latency problems
7820 * at high speeds when copying large data blocks to the shared
7821 * memory.
7822 *
7823 * This function in effect, breaks the a large shared memory write
7824 * into multiple transations by interleaving a shared memory read
7825 * which will flush the write FIFO and 'complete' the write
7826 * transation. This allows any pending DMA request to gain control
7827 * of the local bus in a timely fasion.
7828 *
7829 * Arguments:
7830 *
7831 * TargetPtr pointer to target address in PCI shared memory
7832 * SourcePtr pointer to source buffer for data
7833 * count count in bytes of data to copy
7834 *
7835 * Return Value: None
7836 */
mgsl_load_pci_memory(char * TargetPtr,const char * SourcePtr,unsigned short count)7837 void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7838 unsigned short count )
7839 {
7840 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7841 #define PCI_LOAD_INTERVAL 64
7842
7843 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7844 unsigned short Index;
7845 unsigned long Dummy;
7846
7847 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7848 {
7849 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7850 Dummy = *((volatile unsigned long *)TargetPtr);
7851 TargetPtr += PCI_LOAD_INTERVAL;
7852 SourcePtr += PCI_LOAD_INTERVAL;
7853 }
7854
7855 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7856
7857 } /* End Of mgsl_load_pci_memory() */
7858
mgsl_trace_block(struct mgsl_struct * info,const char * data,int count,int xmit)7859 void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7860 {
7861 int i;
7862 int linecount;
7863 if (xmit)
7864 printk("%s tx data:\n",info->device_name);
7865 else
7866 printk("%s rx data:\n",info->device_name);
7867
7868 while(count) {
7869 if (count > 16)
7870 linecount = 16;
7871 else
7872 linecount = count;
7873
7874 for(i=0;i<linecount;i++)
7875 printk("%02X ",(unsigned char)data[i]);
7876 for(;i<17;i++)
7877 printk(" ");
7878 for(i=0;i<linecount;i++) {
7879 if (data[i]>=040 && data[i]<=0176)
7880 printk("%c",data[i]);
7881 else
7882 printk(".");
7883 }
7884 printk("\n");
7885
7886 data += linecount;
7887 count -= linecount;
7888 }
7889 } /* end of mgsl_trace_block() */
7890
7891 /* mgsl_tx_timeout()
7892 *
7893 * called when HDLC frame times out
7894 * update stats and do tx completion processing
7895 *
7896 * Arguments: context pointer to device instance data
7897 * Return Value: None
7898 */
mgsl_tx_timeout(unsigned long context)7899 void mgsl_tx_timeout(unsigned long context)
7900 {
7901 struct mgsl_struct *info = (struct mgsl_struct*)context;
7902 unsigned long flags;
7903
7904 if ( debug_level >= DEBUG_LEVEL_INFO )
7905 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7906 __FILE__,__LINE__,info->device_name);
7907 if(info->tx_active &&
7908 (info->params.mode == MGSL_MODE_HDLC ||
7909 info->params.mode == MGSL_MODE_RAW) ) {
7910 info->icount.txtimeout++;
7911 }
7912 spin_lock_irqsave(&info->irq_spinlock,flags);
7913 info->tx_active = 0;
7914 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7915
7916 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7917 usc_loopmode_cancel_transmit( info );
7918
7919 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7920
7921 #ifdef CONFIG_SYNCLINK_SYNCPPP
7922 if (info->netcount)
7923 mgsl_sppp_tx_done(info);
7924 else
7925 #endif
7926 mgsl_bh_transmit(info);
7927
7928 } /* end of mgsl_tx_timeout() */
7929
7930 /* signal that there are no more frames to send, so that
7931 * line is 'released' by echoing RxD to TxD when current
7932 * transmission is complete (or immediately if no tx in progress).
7933 */
mgsl_loopmode_send_done(struct mgsl_struct * info)7934 static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7935 {
7936 unsigned long flags;
7937
7938 spin_lock_irqsave(&info->irq_spinlock,flags);
7939 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7940 if (info->tx_active)
7941 info->loopmode_send_done_requested = TRUE;
7942 else
7943 usc_loopmode_send_done(info);
7944 }
7945 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7946
7947 return 0;
7948 }
7949
7950 /* release the line by echoing RxD to TxD
7951 * upon completion of a transmit frame
7952 */
usc_loopmode_send_done(struct mgsl_struct * info)7953 void usc_loopmode_send_done( struct mgsl_struct * info )
7954 {
7955 info->loopmode_send_done_requested = FALSE;
7956 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7957 info->cmr_value &= ~BIT13;
7958 usc_OutReg(info, CMR, info->cmr_value);
7959 }
7960
7961 /* abort a transmit in progress while in HDLC LoopMode
7962 */
usc_loopmode_cancel_transmit(struct mgsl_struct * info)7963 void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7964 {
7965 /* reset tx dma channel and purge TxFifo */
7966 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7967 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7968 usc_loopmode_send_done( info );
7969 }
7970
7971 /* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7972 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7973 * we must clear CMR:13 to begin repeating TxData to RxData
7974 */
usc_loopmode_insert_request(struct mgsl_struct * info)7975 void usc_loopmode_insert_request( struct mgsl_struct * info )
7976 {
7977 info->loopmode_insert_requested = TRUE;
7978
7979 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7980 * begin repeating TxData on RxData (complete insertion)
7981 */
7982 usc_OutReg( info, RICR,
7983 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7984
7985 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7986 info->cmr_value |= BIT13;
7987 usc_OutReg(info, CMR, info->cmr_value);
7988 }
7989
7990 /* return 1 if station is inserted into the loop, otherwise 0
7991 */
usc_loopmode_active(struct mgsl_struct * info)7992 int usc_loopmode_active( struct mgsl_struct * info)
7993 {
7994 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7995 }
7996
7997 /* return 1 if USC is in loop send mode, otherwise 0
7998 */
usc_loopmode_send_active(struct mgsl_struct * info)7999 int usc_loopmode_send_active( struct mgsl_struct * info )
8000 {
8001 return usc_InReg( info, CCSR ) & BIT6 ? 1 : 0 ;
8002 }
8003
8004 #ifdef CONFIG_SYNCLINK_SYNCPPP
8005 /* syncppp net device routines
8006 */
8007
mgsl_sppp_init(struct mgsl_struct * info)8008 void mgsl_sppp_init(struct mgsl_struct *info)
8009 {
8010 struct net_device *d;
8011
8012 sprintf(info->netname,"mgsl%d",info->line);
8013
8014 info->if_ptr = &info->pppdev;
8015 info->netdev = info->pppdev.dev = &info->netdevice;
8016
8017 sppp_attach(&info->pppdev);
8018
8019 d = info->netdev;
8020 strcpy(d->name,info->netname);
8021 d->base_addr = info->io_base;
8022 d->irq = info->irq_level;
8023 d->dma = info->dma_level;
8024 d->priv = info;
8025 d->init = NULL;
8026 d->open = mgsl_sppp_open;
8027 d->stop = mgsl_sppp_close;
8028 d->hard_start_xmit = mgsl_sppp_tx;
8029 d->do_ioctl = mgsl_sppp_ioctl;
8030 d->get_stats = mgsl_net_stats;
8031 d->tx_timeout = mgsl_sppp_tx_timeout;
8032 d->watchdog_timeo = 10*HZ;
8033
8034 #if LINUX_VERSION_CODE < VERSION(2,4,4)
8035 dev_init_buffers(d);
8036 #endif
8037
8038 if (register_netdev(d) == -1) {
8039 printk(KERN_WARNING "%s: register_netdev failed.\n", d->name);
8040 sppp_detach(info->netdev);
8041 return;
8042 }
8043
8044 if (debug_level >= DEBUG_LEVEL_INFO)
8045 printk("mgsl_sppp_init()\n");
8046 }
8047
mgsl_sppp_delete(struct mgsl_struct * info)8048 void mgsl_sppp_delete(struct mgsl_struct *info)
8049 {
8050 if (debug_level >= DEBUG_LEVEL_INFO)
8051 printk("mgsl_sppp_delete(%s)\n",info->netname);
8052 sppp_detach(info->netdev);
8053 unregister_netdev(info->netdev);
8054 }
8055
mgsl_sppp_open(struct net_device * d)8056 int mgsl_sppp_open(struct net_device *d)
8057 {
8058 struct mgsl_struct *info = d->priv;
8059 int err;
8060 unsigned long flags;
8061
8062 if (debug_level >= DEBUG_LEVEL_INFO)
8063 printk("mgsl_sppp_open(%s)\n",info->netname);
8064
8065 spin_lock_irqsave(&info->netlock, flags);
8066 if (info->count != 0 || info->netcount != 0) {
8067 printk(KERN_WARNING "%s: sppp_open returning busy\n", info->netname);
8068 spin_unlock_irqrestore(&info->netlock, flags);
8069 return -EBUSY;
8070 }
8071 info->netcount=1;
8072 MOD_INC_USE_COUNT;
8073 spin_unlock_irqrestore(&info->netlock, flags);
8074
8075 /* claim resources and init adapter */
8076 if ((err = startup(info)) != 0)
8077 goto open_fail;
8078
8079 /* allow syncppp module to do open processing */
8080 if ((err = sppp_open(d)) != 0) {
8081 shutdown(info);
8082 goto open_fail;
8083 }
8084
8085 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
8086 mgsl_program_hw(info);
8087
8088 d->trans_start = jiffies;
8089 netif_start_queue(d);
8090 return 0;
8091
8092 open_fail:
8093 spin_lock_irqsave(&info->netlock, flags);
8094 info->netcount=0;
8095 MOD_DEC_USE_COUNT;
8096 spin_unlock_irqrestore(&info->netlock, flags);
8097 return err;
8098 }
8099
mgsl_sppp_tx_timeout(struct net_device * dev)8100 void mgsl_sppp_tx_timeout(struct net_device *dev)
8101 {
8102 struct mgsl_struct *info = dev->priv;
8103 unsigned long flags;
8104
8105 if (debug_level >= DEBUG_LEVEL_INFO)
8106 printk("mgsl_sppp_tx_timeout(%s)\n",info->netname);
8107
8108 info->netstats.tx_errors++;
8109 info->netstats.tx_aborted_errors++;
8110
8111 spin_lock_irqsave(&info->irq_spinlock,flags);
8112 usc_stop_transmitter(info);
8113 spin_unlock_irqrestore(&info->irq_spinlock,flags);
8114
8115 netif_wake_queue(dev);
8116 }
8117
mgsl_sppp_tx(struct sk_buff * skb,struct net_device * dev)8118 int mgsl_sppp_tx(struct sk_buff *skb, struct net_device *dev)
8119 {
8120 struct mgsl_struct *info = dev->priv;
8121 unsigned long flags;
8122
8123 if (debug_level >= DEBUG_LEVEL_INFO)
8124 printk("mgsl_sppp_tx(%s)\n",info->netname);
8125
8126 netif_stop_queue(dev);
8127
8128 info->xmit_cnt = skb->len;
8129 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
8130 info->netstats.tx_packets++;
8131 info->netstats.tx_bytes += skb->len;
8132 dev_kfree_skb(skb);
8133
8134 dev->trans_start = jiffies;
8135
8136 spin_lock_irqsave(&info->irq_spinlock,flags);
8137 if (!info->tx_active)
8138 usc_start_transmitter(info);
8139 spin_unlock_irqrestore(&info->irq_spinlock,flags);
8140
8141 return 0;
8142 }
8143
mgsl_sppp_close(struct net_device * d)8144 int mgsl_sppp_close(struct net_device *d)
8145 {
8146 struct mgsl_struct *info = d->priv;
8147 unsigned long flags;
8148
8149 if (debug_level >= DEBUG_LEVEL_INFO)
8150 printk("mgsl_sppp_close(%s)\n",info->netname);
8151
8152 /* shutdown adapter and release resources */
8153 shutdown(info);
8154
8155 /* allow syncppp to do close processing */
8156 sppp_close(d);
8157 netif_stop_queue(d);
8158
8159 spin_lock_irqsave(&info->netlock, flags);
8160 info->netcount=0;
8161 MOD_DEC_USE_COUNT;
8162 spin_unlock_irqrestore(&info->netlock, flags);
8163 return 0;
8164 }
8165
mgsl_sppp_rx_done(struct mgsl_struct * info,char * buf,int size)8166 void mgsl_sppp_rx_done(struct mgsl_struct *info, char *buf, int size)
8167 {
8168 struct sk_buff *skb = dev_alloc_skb(size);
8169 if (debug_level >= DEBUG_LEVEL_INFO)
8170 printk("mgsl_sppp_rx_done(%s)\n",info->netname);
8171 if (skb == NULL) {
8172 printk(KERN_NOTICE "%s: cant alloc skb, dropping packet\n",
8173 info->netname);
8174 info->netstats.rx_dropped++;
8175 return;
8176 }
8177
8178 memcpy(skb_put(skb, size),buf,size);
8179
8180 skb->protocol = htons(ETH_P_WAN_PPP);
8181 skb->dev = info->netdev;
8182 skb->mac.raw = skb->data;
8183 info->netstats.rx_packets++;
8184 info->netstats.rx_bytes += size;
8185 netif_rx(skb);
8186 info->netdev->trans_start = jiffies;
8187 }
8188
mgsl_sppp_tx_done(struct mgsl_struct * info)8189 void mgsl_sppp_tx_done(struct mgsl_struct *info)
8190 {
8191 if (netif_queue_stopped(info->netdev))
8192 netif_wake_queue(info->netdev);
8193 }
8194
mgsl_net_stats(struct net_device * dev)8195 struct net_device_stats *mgsl_net_stats(struct net_device *dev)
8196 {
8197 struct mgsl_struct *info = dev->priv;
8198 if (debug_level >= DEBUG_LEVEL_INFO)
8199 printk("mgsl_net_stats(%s)\n",info->netname);
8200 return &info->netstats;
8201 }
8202
mgsl_sppp_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)8203 int mgsl_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8204 {
8205 struct mgsl_struct *info = (struct mgsl_struct *)dev->priv;
8206 if (debug_level >= DEBUG_LEVEL_INFO)
8207 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
8208 info->netname, cmd );
8209 return sppp_do_ioctl(dev, ifr, cmd);
8210 }
8211
8212 #endif /* ifdef CONFIG_SYNCLINK_SYNCPPP */
8213
synclink_init_one(struct pci_dev * dev,const struct pci_device_id * ent)8214 static int __init synclink_init_one (struct pci_dev *dev,
8215 const struct pci_device_id *ent)
8216 {
8217 struct mgsl_struct *info;
8218
8219 if (pci_enable_device(dev)) {
8220 printk("error enabling pci device %p\n", dev);
8221 return -EIO;
8222 }
8223
8224 if (!(info = mgsl_allocate_device())) {
8225 printk("can't allocate device instance data.\n");
8226 return -EIO;
8227 }
8228
8229 /* Copy user configuration info to device instance data */
8230
8231 info->io_base = pci_resource_start(dev, 2);
8232 info->irq_level = dev->irq;
8233 info->phys_memory_base = pci_resource_start(dev, 3);
8234
8235 /* Because veremap only works on page boundaries we must map
8236 * a larger area than is actually implemented for the LCR
8237 * memory range. We map a full page starting at the page boundary.
8238 */
8239 info->phys_lcr_base = pci_resource_start(dev, 0);
8240 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8241 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8242
8243 info->bus_type = MGSL_BUS_TYPE_PCI;
8244 info->io_addr_size = 8;
8245 info->irq_flags = SA_SHIRQ;
8246
8247 if (dev->device == 0x0210) {
8248 /* Version 1 PCI9030 based universal PCI adapter */
8249 info->misc_ctrl_value = 0x007c4080;
8250 info->hw_version = 1;
8251 } else {
8252 /* Version 0 PCI9050 based 5V PCI adapter
8253 * A PCI9050 bug prevents reading LCR registers if
8254 * LCR base address bit 7 is set. Maintain shadow
8255 * value so we can write to LCR misc control reg.
8256 */
8257 info->misc_ctrl_value = 0x087e4546;
8258 info->hw_version = 0;
8259 }
8260
8261 mgsl_add_device(info);
8262
8263 return 0;
8264 }
8265
synclink_remove_one(struct pci_dev * dev)8266 static void __devexit synclink_remove_one (struct pci_dev *dev)
8267 {
8268 }
8269
8270