1 /*
2 * linux/drivers/char/synclink.c
3 *
4 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
5 *
6 * Device driver for Microgate SyncLink ISA and PCI
7 * high speed multiprotocol serial adapters.
8 *
9 * written by Paul Fulghum for Microgate Corporation
10 * paulkf@microgate.com
11 *
12 * Microgate and SyncLink are trademarks of Microgate Corporation
13 *
14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
15 *
16 * Original release 01/11/99
17 *
18 * This code is released under the GNU General Public License (GPL)
19 *
20 * This driver is primarily intended for use in synchronous
21 * HDLC mode. Asynchronous mode is also provided.
22 *
23 * When operating in synchronous mode, each call to mgsl_write()
24 * contains exactly one complete HDLC frame. Calling mgsl_put_char
25 * will start assembling an HDLC frame that will not be sent until
26 * mgsl_flush_chars or mgsl_write is called.
27 *
28 * Synchronous receive data is reported as complete frames. To accomplish
29 * this, the TTY flip buffer is bypassed (too small to hold largest
30 * frame and may fragment frames) and the line discipline
31 * receive entry point is called directly.
32 *
33 * This driver has been tested with a slightly modified ppp.c driver
34 * for synchronous PPP.
35 *
36 * 2000/02/16
37 * Added interface for syncppp.c driver (an alternate synchronous PPP
38 * implementation that also supports Cisco HDLC). Each device instance
39 * registers as a tty device AND a network device (if dosyncppp option
40 * is set for the device). The functionality is determined by which
41 * device interface is opened.
42 *
43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
53 * OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56 #if defined(__i386__)
57 # define BREAKPOINT() asm(" int $3");
58 #else
59 # define BREAKPOINT() { }
60 #endif
61
62 #define MAX_ISA_DEVICES 10
63 #define MAX_PCI_DEVICES 10
64 #define MAX_TOTAL_DEVICES 20
65
66 #include <linux/module.h>
67 #include <linux/errno.h>
68 #include <linux/signal.h>
69 #include <linux/sched.h>
70 #include <linux/timer.h>
71 #include <linux/interrupt.h>
72 #include <linux/pci.h>
73 #include <linux/tty.h>
74 #include <linux/tty_flip.h>
75 #include <linux/serial.h>
76 #include <linux/major.h>
77 #include <linux/string.h>
78 #include <linux/fcntl.h>
79 #include <linux/ptrace.h>
80 #include <linux/ioport.h>
81 #include <linux/mm.h>
82 #include <linux/seq_file.h>
83 #include <linux/slab.h>
84 #include <linux/delay.h>
85 #include <linux/netdevice.h>
86 #include <linux/vmalloc.h>
87 #include <linux/init.h>
88 #include <linux/ioctl.h>
89 #include <linux/synclink.h>
90
91 #include <asm/system.h>
92 #include <asm/io.h>
93 #include <asm/irq.h>
94 #include <asm/dma.h>
95 #include <linux/bitops.h>
96 #include <asm/types.h>
97 #include <linux/termios.h>
98 #include <linux/workqueue.h>
99 #include <linux/hdlc.h>
100 #include <linux/dma-mapping.h>
101
102 #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
103 #define SYNCLINK_GENERIC_HDLC 1
104 #else
105 #define SYNCLINK_GENERIC_HDLC 0
106 #endif
107
108 #define GET_USER(error,value,addr) error = get_user(value,addr)
109 #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
110 #define PUT_USER(error,value,addr) error = put_user(value,addr)
111 #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
112
113 #include <asm/uaccess.h>
114
115 #define RCLRVALUE 0xffff
116
117 static MGSL_PARAMS default_params = {
118 MGSL_MODE_HDLC, /* unsigned long mode */
119 0, /* unsigned char loopback; */
120 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
121 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
122 0, /* unsigned long clock_speed; */
123 0xff, /* unsigned char addr_filter; */
124 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
125 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
126 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
127 9600, /* unsigned long data_rate; */
128 8, /* unsigned char data_bits; */
129 1, /* unsigned char stop_bits; */
130 ASYNC_PARITY_NONE /* unsigned char parity; */
131 };
132
133 #define SHARED_MEM_ADDRESS_SIZE 0x40000
134 #define BUFFERLISTSIZE 4096
135 #define DMABUFFERSIZE 4096
136 #define MAXRXFRAMES 7
137
138 typedef struct _DMABUFFERENTRY
139 {
140 u32 phys_addr; /* 32-bit flat physical address of data buffer */
141 volatile u16 count; /* buffer size/data count */
142 volatile u16 status; /* Control/status field */
143 volatile u16 rcc; /* character count field */
144 u16 reserved; /* padding required by 16C32 */
145 u32 link; /* 32-bit flat link to next buffer entry */
146 char *virt_addr; /* virtual address of data buffer */
147 u32 phys_entry; /* physical address of this buffer entry */
148 dma_addr_t dma_addr;
149 } DMABUFFERENTRY, *DMAPBUFFERENTRY;
150
151 /* The queue of BH actions to be performed */
152
153 #define BH_RECEIVE 1
154 #define BH_TRANSMIT 2
155 #define BH_STATUS 4
156
157 #define IO_PIN_SHUTDOWN_LIMIT 100
158
159 struct _input_signal_events {
160 int ri_up;
161 int ri_down;
162 int dsr_up;
163 int dsr_down;
164 int dcd_up;
165 int dcd_down;
166 int cts_up;
167 int cts_down;
168 };
169
170 /* transmit holding buffer definitions*/
171 #define MAX_TX_HOLDING_BUFFERS 5
172 struct tx_holding_buffer {
173 int buffer_size;
174 unsigned char * buffer;
175 };
176
177
178 /*
179 * Device instance data structure
180 */
181
182 struct mgsl_struct {
183 int magic;
184 struct tty_port port;
185 int line;
186 int hw_version;
187
188 struct mgsl_icount icount;
189
190 int timeout;
191 int x_char; /* xon/xoff character */
192 u16 read_status_mask;
193 u16 ignore_status_mask;
194 unsigned char *xmit_buf;
195 int xmit_head;
196 int xmit_tail;
197 int xmit_cnt;
198
199 wait_queue_head_t status_event_wait_q;
200 wait_queue_head_t event_wait_q;
201 struct timer_list tx_timer; /* HDLC transmit timeout timer */
202 struct mgsl_struct *next_device; /* device list link */
203
204 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
205 struct work_struct task; /* task structure for scheduling bh */
206
207 u32 EventMask; /* event trigger mask */
208 u32 RecordedEvents; /* pending events */
209
210 u32 max_frame_size; /* as set by device config */
211
212 u32 pending_bh;
213
214 bool bh_running; /* Protection from multiple */
215 int isr_overflow;
216 bool bh_requested;
217
218 int dcd_chkcount; /* check counts to prevent */
219 int cts_chkcount; /* too many IRQs if a signal */
220 int dsr_chkcount; /* is floating */
221 int ri_chkcount;
222
223 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
224 u32 buffer_list_phys;
225 dma_addr_t buffer_list_dma_addr;
226
227 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
228 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
229 unsigned int current_rx_buffer;
230
231 int num_tx_dma_buffers; /* number of tx dma frames required */
232 int tx_dma_buffers_used;
233 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
234 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
235 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
236 int current_tx_buffer; /* next tx dma buffer to be loaded */
237
238 unsigned char *intermediate_rxbuffer;
239
240 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
241 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
242 int put_tx_holding_index; /* next tx holding buffer to store user request */
243 int tx_holding_count; /* number of tx holding buffers waiting */
244 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
245
246 bool rx_enabled;
247 bool rx_overflow;
248 bool rx_rcc_underrun;
249
250 bool tx_enabled;
251 bool tx_active;
252 u32 idle_mode;
253
254 u16 cmr_value;
255 u16 tcsr_value;
256
257 char device_name[25]; /* device instance name */
258
259 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
260 unsigned char bus; /* expansion bus number (zero based) */
261 unsigned char function; /* PCI device number */
262
263 unsigned int io_base; /* base I/O address of adapter */
264 unsigned int io_addr_size; /* size of the I/O address range */
265 bool io_addr_requested; /* true if I/O address requested */
266
267 unsigned int irq_level; /* interrupt level */
268 unsigned long irq_flags;
269 bool irq_requested; /* true if IRQ requested */
270
271 unsigned int dma_level; /* DMA channel */
272 bool dma_requested; /* true if dma channel requested */
273
274 u16 mbre_bit;
275 u16 loopback_bits;
276 u16 usc_idle_mode;
277
278 MGSL_PARAMS params; /* communications parameters */
279
280 unsigned char serial_signals; /* current serial signal states */
281
282 bool irq_occurred; /* for diagnostics use */
283 unsigned int init_error; /* Initialization startup error (DIAGS) */
284 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
285
286 u32 last_mem_alloc;
287 unsigned char* memory_base; /* shared memory address (PCI only) */
288 u32 phys_memory_base;
289 bool shared_mem_requested;
290
291 unsigned char* lcr_base; /* local config registers (PCI only) */
292 u32 phys_lcr_base;
293 u32 lcr_offset;
294 bool lcr_mem_requested;
295
296 u32 misc_ctrl_value;
297 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
298 char char_buf[MAX_ASYNC_BUFFER_SIZE];
299 bool drop_rts_on_tx_done;
300
301 bool loopmode_insert_requested;
302 bool loopmode_send_done_requested;
303
304 struct _input_signal_events input_signal_events;
305
306 /* generic HDLC device parts */
307 int netcount;
308 spinlock_t netlock;
309
310 #if SYNCLINK_GENERIC_HDLC
311 struct net_device *netdev;
312 #endif
313 };
314
315 #define MGSL_MAGIC 0x5401
316
317 /*
318 * The size of the serial xmit buffer is 1 page, or 4096 bytes
319 */
320 #ifndef SERIAL_XMIT_SIZE
321 #define SERIAL_XMIT_SIZE 4096
322 #endif
323
324 /*
325 * These macros define the offsets used in calculating the
326 * I/O address of the specified USC registers.
327 */
328
329
330 #define DCPIN 2 /* Bit 1 of I/O address */
331 #define SDPIN 4 /* Bit 2 of I/O address */
332
333 #define DCAR 0 /* DMA command/address register */
334 #define CCAR SDPIN /* channel command/address register */
335 #define DATAREG DCPIN + SDPIN /* serial data register */
336 #define MSBONLY 0x41
337 #define LSBONLY 0x40
338
339 /*
340 * These macros define the register address (ordinal number)
341 * used for writing address/value pairs to the USC.
342 */
343
344 #define CMR 0x02 /* Channel mode Register */
345 #define CCSR 0x04 /* Channel Command/status Register */
346 #define CCR 0x06 /* Channel Control Register */
347 #define PSR 0x08 /* Port status Register */
348 #define PCR 0x0a /* Port Control Register */
349 #define TMDR 0x0c /* Test mode Data Register */
350 #define TMCR 0x0e /* Test mode Control Register */
351 #define CMCR 0x10 /* Clock mode Control Register */
352 #define HCR 0x12 /* Hardware Configuration Register */
353 #define IVR 0x14 /* Interrupt Vector Register */
354 #define IOCR 0x16 /* Input/Output Control Register */
355 #define ICR 0x18 /* Interrupt Control Register */
356 #define DCCR 0x1a /* Daisy Chain Control Register */
357 #define MISR 0x1c /* Misc Interrupt status Register */
358 #define SICR 0x1e /* status Interrupt Control Register */
359 #define RDR 0x20 /* Receive Data Register */
360 #define RMR 0x22 /* Receive mode Register */
361 #define RCSR 0x24 /* Receive Command/status Register */
362 #define RICR 0x26 /* Receive Interrupt Control Register */
363 #define RSR 0x28 /* Receive Sync Register */
364 #define RCLR 0x2a /* Receive count Limit Register */
365 #define RCCR 0x2c /* Receive Character count Register */
366 #define TC0R 0x2e /* Time Constant 0 Register */
367 #define TDR 0x30 /* Transmit Data Register */
368 #define TMR 0x32 /* Transmit mode Register */
369 #define TCSR 0x34 /* Transmit Command/status Register */
370 #define TICR 0x36 /* Transmit Interrupt Control Register */
371 #define TSR 0x38 /* Transmit Sync Register */
372 #define TCLR 0x3a /* Transmit count Limit Register */
373 #define TCCR 0x3c /* Transmit Character count Register */
374 #define TC1R 0x3e /* Time Constant 1 Register */
375
376
377 /*
378 * MACRO DEFINITIONS FOR DMA REGISTERS
379 */
380
381 #define DCR 0x06 /* DMA Control Register (shared) */
382 #define DACR 0x08 /* DMA Array count Register (shared) */
383 #define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
384 #define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
385 #define DICR 0x18 /* DMA Interrupt Control Register (shared) */
386 #define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
387 #define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
388
389 #define TDMR 0x02 /* Transmit DMA mode Register */
390 #define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
391 #define TBCR 0x2a /* Transmit Byte count Register */
392 #define TARL 0x2c /* Transmit Address Register (low) */
393 #define TARU 0x2e /* Transmit Address Register (high) */
394 #define NTBCR 0x3a /* Next Transmit Byte count Register */
395 #define NTARL 0x3c /* Next Transmit Address Register (low) */
396 #define NTARU 0x3e /* Next Transmit Address Register (high) */
397
398 #define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
399 #define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
400 #define RBCR 0xaa /* Receive Byte count Register */
401 #define RARL 0xac /* Receive Address Register (low) */
402 #define RARU 0xae /* Receive Address Register (high) */
403 #define NRBCR 0xba /* Next Receive Byte count Register */
404 #define NRARL 0xbc /* Next Receive Address Register (low) */
405 #define NRARU 0xbe /* Next Receive Address Register (high) */
406
407
408 /*
409 * MACRO DEFINITIONS FOR MODEM STATUS BITS
410 */
411
412 #define MODEMSTATUS_DTR 0x80
413 #define MODEMSTATUS_DSR 0x40
414 #define MODEMSTATUS_RTS 0x20
415 #define MODEMSTATUS_CTS 0x10
416 #define MODEMSTATUS_RI 0x04
417 #define MODEMSTATUS_DCD 0x01
418
419
420 /*
421 * Channel Command/Address Register (CCAR) Command Codes
422 */
423
424 #define RTCmd_Null 0x0000
425 #define RTCmd_ResetHighestIus 0x1000
426 #define RTCmd_TriggerChannelLoadDma 0x2000
427 #define RTCmd_TriggerRxDma 0x2800
428 #define RTCmd_TriggerTxDma 0x3000
429 #define RTCmd_TriggerRxAndTxDma 0x3800
430 #define RTCmd_PurgeRxFifo 0x4800
431 #define RTCmd_PurgeTxFifo 0x5000
432 #define RTCmd_PurgeRxAndTxFifo 0x5800
433 #define RTCmd_LoadRcc 0x6800
434 #define RTCmd_LoadTcc 0x7000
435 #define RTCmd_LoadRccAndTcc 0x7800
436 #define RTCmd_LoadTC0 0x8800
437 #define RTCmd_LoadTC1 0x9000
438 #define RTCmd_LoadTC0AndTC1 0x9800
439 #define RTCmd_SerialDataLSBFirst 0xa000
440 #define RTCmd_SerialDataMSBFirst 0xa800
441 #define RTCmd_SelectBigEndian 0xb000
442 #define RTCmd_SelectLittleEndian 0xb800
443
444
445 /*
446 * DMA Command/Address Register (DCAR) Command Codes
447 */
448
449 #define DmaCmd_Null 0x0000
450 #define DmaCmd_ResetTxChannel 0x1000
451 #define DmaCmd_ResetRxChannel 0x1200
452 #define DmaCmd_StartTxChannel 0x2000
453 #define DmaCmd_StartRxChannel 0x2200
454 #define DmaCmd_ContinueTxChannel 0x3000
455 #define DmaCmd_ContinueRxChannel 0x3200
456 #define DmaCmd_PauseTxChannel 0x4000
457 #define DmaCmd_PauseRxChannel 0x4200
458 #define DmaCmd_AbortTxChannel 0x5000
459 #define DmaCmd_AbortRxChannel 0x5200
460 #define DmaCmd_InitTxChannel 0x7000
461 #define DmaCmd_InitRxChannel 0x7200
462 #define DmaCmd_ResetHighestDmaIus 0x8000
463 #define DmaCmd_ResetAllChannels 0x9000
464 #define DmaCmd_StartAllChannels 0xa000
465 #define DmaCmd_ContinueAllChannels 0xb000
466 #define DmaCmd_PauseAllChannels 0xc000
467 #define DmaCmd_AbortAllChannels 0xd000
468 #define DmaCmd_InitAllChannels 0xf000
469
470 #define TCmd_Null 0x0000
471 #define TCmd_ClearTxCRC 0x2000
472 #define TCmd_SelectTicrTtsaData 0x4000
473 #define TCmd_SelectTicrTxFifostatus 0x5000
474 #define TCmd_SelectTicrIntLevel 0x6000
475 #define TCmd_SelectTicrdma_level 0x7000
476 #define TCmd_SendFrame 0x8000
477 #define TCmd_SendAbort 0x9000
478 #define TCmd_EnableDleInsertion 0xc000
479 #define TCmd_DisableDleInsertion 0xd000
480 #define TCmd_ClearEofEom 0xe000
481 #define TCmd_SetEofEom 0xf000
482
483 #define RCmd_Null 0x0000
484 #define RCmd_ClearRxCRC 0x2000
485 #define RCmd_EnterHuntmode 0x3000
486 #define RCmd_SelectRicrRtsaData 0x4000
487 #define RCmd_SelectRicrRxFifostatus 0x5000
488 #define RCmd_SelectRicrIntLevel 0x6000
489 #define RCmd_SelectRicrdma_level 0x7000
490
491 /*
492 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
493 */
494
495 #define RECEIVE_STATUS BIT5
496 #define RECEIVE_DATA BIT4
497 #define TRANSMIT_STATUS BIT3
498 #define TRANSMIT_DATA BIT2
499 #define IO_PIN BIT1
500 #define MISC BIT0
501
502
503 /*
504 * Receive status Bits in Receive Command/status Register RCSR
505 */
506
507 #define RXSTATUS_SHORT_FRAME BIT8
508 #define RXSTATUS_CODE_VIOLATION BIT8
509 #define RXSTATUS_EXITED_HUNT BIT7
510 #define RXSTATUS_IDLE_RECEIVED BIT6
511 #define RXSTATUS_BREAK_RECEIVED BIT5
512 #define RXSTATUS_ABORT_RECEIVED BIT5
513 #define RXSTATUS_RXBOUND BIT4
514 #define RXSTATUS_CRC_ERROR BIT3
515 #define RXSTATUS_FRAMING_ERROR BIT3
516 #define RXSTATUS_ABORT BIT2
517 #define RXSTATUS_PARITY_ERROR BIT2
518 #define RXSTATUS_OVERRUN BIT1
519 #define RXSTATUS_DATA_AVAILABLE BIT0
520 #define RXSTATUS_ALL 0x01f6
521 #define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
522
523 /*
524 * Values for setting transmit idle mode in
525 * Transmit Control/status Register (TCSR)
526 */
527 #define IDLEMODE_FLAGS 0x0000
528 #define IDLEMODE_ALT_ONE_ZERO 0x0100
529 #define IDLEMODE_ZERO 0x0200
530 #define IDLEMODE_ONE 0x0300
531 #define IDLEMODE_ALT_MARK_SPACE 0x0500
532 #define IDLEMODE_SPACE 0x0600
533 #define IDLEMODE_MARK 0x0700
534 #define IDLEMODE_MASK 0x0700
535
536 /*
537 * IUSC revision identifiers
538 */
539 #define IUSC_SL1660 0x4d44
540 #define IUSC_PRE_SL1660 0x4553
541
542 /*
543 * Transmit status Bits in Transmit Command/status Register (TCSR)
544 */
545
546 #define TCSR_PRESERVE 0x0F00
547
548 #define TCSR_UNDERWAIT BIT11
549 #define TXSTATUS_PREAMBLE_SENT BIT7
550 #define TXSTATUS_IDLE_SENT BIT6
551 #define TXSTATUS_ABORT_SENT BIT5
552 #define TXSTATUS_EOF_SENT BIT4
553 #define TXSTATUS_EOM_SENT BIT4
554 #define TXSTATUS_CRC_SENT BIT3
555 #define TXSTATUS_ALL_SENT BIT2
556 #define TXSTATUS_UNDERRUN BIT1
557 #define TXSTATUS_FIFO_EMPTY BIT0
558 #define TXSTATUS_ALL 0x00fa
559 #define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
560
561
562 #define MISCSTATUS_RXC_LATCHED BIT15
563 #define MISCSTATUS_RXC BIT14
564 #define MISCSTATUS_TXC_LATCHED BIT13
565 #define MISCSTATUS_TXC BIT12
566 #define MISCSTATUS_RI_LATCHED BIT11
567 #define MISCSTATUS_RI BIT10
568 #define MISCSTATUS_DSR_LATCHED BIT9
569 #define MISCSTATUS_DSR BIT8
570 #define MISCSTATUS_DCD_LATCHED BIT7
571 #define MISCSTATUS_DCD BIT6
572 #define MISCSTATUS_CTS_LATCHED BIT5
573 #define MISCSTATUS_CTS BIT4
574 #define MISCSTATUS_RCC_UNDERRUN BIT3
575 #define MISCSTATUS_DPLL_NO_SYNC BIT2
576 #define MISCSTATUS_BRG1_ZERO BIT1
577 #define MISCSTATUS_BRG0_ZERO BIT0
578
579 #define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
580 #define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
581
582 #define SICR_RXC_ACTIVE BIT15
583 #define SICR_RXC_INACTIVE BIT14
584 #define SICR_RXC (BIT15+BIT14)
585 #define SICR_TXC_ACTIVE BIT13
586 #define SICR_TXC_INACTIVE BIT12
587 #define SICR_TXC (BIT13+BIT12)
588 #define SICR_RI_ACTIVE BIT11
589 #define SICR_RI_INACTIVE BIT10
590 #define SICR_RI (BIT11+BIT10)
591 #define SICR_DSR_ACTIVE BIT9
592 #define SICR_DSR_INACTIVE BIT8
593 #define SICR_DSR (BIT9+BIT8)
594 #define SICR_DCD_ACTIVE BIT7
595 #define SICR_DCD_INACTIVE BIT6
596 #define SICR_DCD (BIT7+BIT6)
597 #define SICR_CTS_ACTIVE BIT5
598 #define SICR_CTS_INACTIVE BIT4
599 #define SICR_CTS (BIT5+BIT4)
600 #define SICR_RCC_UNDERFLOW BIT3
601 #define SICR_DPLL_NO_SYNC BIT2
602 #define SICR_BRG1_ZERO BIT1
603 #define SICR_BRG0_ZERO BIT0
604
605 void usc_DisableMasterIrqBit( struct mgsl_struct *info );
606 void usc_EnableMasterIrqBit( struct mgsl_struct *info );
607 void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
608 void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
609 void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
610
611 #define usc_EnableInterrupts( a, b ) \
612 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
613
614 #define usc_DisableInterrupts( a, b ) \
615 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
616
617 #define usc_EnableMasterIrqBit(a) \
618 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
619
620 #define usc_DisableMasterIrqBit(a) \
621 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
622
623 #define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
624
625 /*
626 * Transmit status Bits in Transmit Control status Register (TCSR)
627 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
628 */
629
630 #define TXSTATUS_PREAMBLE_SENT BIT7
631 #define TXSTATUS_IDLE_SENT BIT6
632 #define TXSTATUS_ABORT_SENT BIT5
633 #define TXSTATUS_EOF BIT4
634 #define TXSTATUS_CRC_SENT BIT3
635 #define TXSTATUS_ALL_SENT BIT2
636 #define TXSTATUS_UNDERRUN BIT1
637 #define TXSTATUS_FIFO_EMPTY BIT0
638
639 #define DICR_MASTER BIT15
640 #define DICR_TRANSMIT BIT0
641 #define DICR_RECEIVE BIT1
642
643 #define usc_EnableDmaInterrupts(a,b) \
644 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
645
646 #define usc_DisableDmaInterrupts(a,b) \
647 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
648
649 #define usc_EnableStatusIrqs(a,b) \
650 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
651
652 #define usc_DisablestatusIrqs(a,b) \
653 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
654
655 /* Transmit status Bits in Transmit Control status Register (TCSR) */
656 /* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
657
658
659 #define DISABLE_UNCONDITIONAL 0
660 #define DISABLE_END_OF_FRAME 1
661 #define ENABLE_UNCONDITIONAL 2
662 #define ENABLE_AUTO_CTS 3
663 #define ENABLE_AUTO_DCD 3
664 #define usc_EnableTransmitter(a,b) \
665 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
666 #define usc_EnableReceiver(a,b) \
667 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
668
669 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
670 static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
671 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
672
673 static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
674 static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
675 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
676 void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
677 void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
678
679 #define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
680 #define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
681
682 #define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
683
684 static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
685 static void usc_start_receiver( struct mgsl_struct *info );
686 static void usc_stop_receiver( struct mgsl_struct *info );
687
688 static void usc_start_transmitter( struct mgsl_struct *info );
689 static void usc_stop_transmitter( struct mgsl_struct *info );
690 static void usc_set_txidle( struct mgsl_struct *info );
691 static void usc_load_txfifo( struct mgsl_struct *info );
692
693 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
694 static void usc_enable_loopback( struct mgsl_struct *info, int enable );
695
696 static void usc_get_serial_signals( struct mgsl_struct *info );
697 static void usc_set_serial_signals( struct mgsl_struct *info );
698
699 static void usc_reset( struct mgsl_struct *info );
700
701 static void usc_set_sync_mode( struct mgsl_struct *info );
702 static void usc_set_sdlc_mode( struct mgsl_struct *info );
703 static void usc_set_async_mode( struct mgsl_struct *info );
704 static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
705
706 static void usc_loopback_frame( struct mgsl_struct *info );
707
708 static void mgsl_tx_timeout(unsigned long context);
709
710
711 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
712 static void usc_loopmode_insert_request( struct mgsl_struct * info );
713 static int usc_loopmode_active( struct mgsl_struct * info);
714 static void usc_loopmode_send_done( struct mgsl_struct * info );
715
716 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
717
718 #if SYNCLINK_GENERIC_HDLC
719 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
720 static void hdlcdev_tx_done(struct mgsl_struct *info);
721 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
722 static int hdlcdev_init(struct mgsl_struct *info);
723 static void hdlcdev_exit(struct mgsl_struct *info);
724 #endif
725
726 /*
727 * Defines a BUS descriptor value for the PCI adapter
728 * local bus address ranges.
729 */
730
731 #define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
732 (0x00400020 + \
733 ((WrHold) << 30) + \
734 ((WrDly) << 28) + \
735 ((RdDly) << 26) + \
736 ((Nwdd) << 20) + \
737 ((Nwad) << 15) + \
738 ((Nxda) << 13) + \
739 ((Nrdd) << 11) + \
740 ((Nrad) << 6) )
741
742 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
743
744 /*
745 * Adapter diagnostic routines
746 */
747 static bool mgsl_register_test( struct mgsl_struct *info );
748 static bool mgsl_irq_test( struct mgsl_struct *info );
749 static bool mgsl_dma_test( struct mgsl_struct *info );
750 static bool mgsl_memory_test( struct mgsl_struct *info );
751 static int mgsl_adapter_test( struct mgsl_struct *info );
752
753 /*
754 * device and resource management routines
755 */
756 static int mgsl_claim_resources(struct mgsl_struct *info);
757 static void mgsl_release_resources(struct mgsl_struct *info);
758 static void mgsl_add_device(struct mgsl_struct *info);
759 static struct mgsl_struct* mgsl_allocate_device(void);
760
761 /*
762 * DMA buffer manupulation functions.
763 */
764 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
765 static bool mgsl_get_rx_frame( struct mgsl_struct *info );
766 static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
767 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
768 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
769 static int num_free_tx_dma_buffers(struct mgsl_struct *info);
770 static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
771 static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
772
773 /*
774 * DMA and Shared Memory buffer allocation and formatting
775 */
776 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
777 static void mgsl_free_dma_buffers(struct mgsl_struct *info);
778 static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
779 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
780 static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
781 static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
782 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
783 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
784 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
785 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
786 static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
787 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
788
789 /*
790 * Bottom half interrupt handlers
791 */
792 static void mgsl_bh_handler(struct work_struct *work);
793 static void mgsl_bh_receive(struct mgsl_struct *info);
794 static void mgsl_bh_transmit(struct mgsl_struct *info);
795 static void mgsl_bh_status(struct mgsl_struct *info);
796
797 /*
798 * Interrupt handler routines and dispatch table.
799 */
800 static void mgsl_isr_null( struct mgsl_struct *info );
801 static void mgsl_isr_transmit_data( struct mgsl_struct *info );
802 static void mgsl_isr_receive_data( struct mgsl_struct *info );
803 static void mgsl_isr_receive_status( struct mgsl_struct *info );
804 static void mgsl_isr_transmit_status( struct mgsl_struct *info );
805 static void mgsl_isr_io_pin( struct mgsl_struct *info );
806 static void mgsl_isr_misc( struct mgsl_struct *info );
807 static void mgsl_isr_receive_dma( struct mgsl_struct *info );
808 static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
809
810 typedef void (*isr_dispatch_func)(struct mgsl_struct *);
811
812 static isr_dispatch_func UscIsrTable[7] =
813 {
814 mgsl_isr_null,
815 mgsl_isr_misc,
816 mgsl_isr_io_pin,
817 mgsl_isr_transmit_data,
818 mgsl_isr_transmit_status,
819 mgsl_isr_receive_data,
820 mgsl_isr_receive_status
821 };
822
823 /*
824 * ioctl call handlers
825 */
826 static int tiocmget(struct tty_struct *tty);
827 static int tiocmset(struct tty_struct *tty,
828 unsigned int set, unsigned int clear);
829 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
830 __user *user_icount);
831 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
832 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
833 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
834 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
835 static int mgsl_txenable(struct mgsl_struct * info, int enable);
836 static int mgsl_txabort(struct mgsl_struct * info);
837 static int mgsl_rxenable(struct mgsl_struct * info, int enable);
838 static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
839 static int mgsl_loopmode_send_done( struct mgsl_struct * info );
840
841 /* set non-zero on successful registration with PCI subsystem */
842 static bool pci_registered;
843
844 /*
845 * Global linked list of SyncLink devices
846 */
847 static struct mgsl_struct *mgsl_device_list;
848 static int mgsl_device_count;
849
850 /*
851 * Set this param to non-zero to load eax with the
852 * .text section address and breakpoint on module load.
853 * This is useful for use with gdb and add-symbol-file command.
854 */
855 static int break_on_load;
856
857 /*
858 * Driver major number, defaults to zero to get auto
859 * assigned major number. May be forced as module parameter.
860 */
861 static int ttymajor;
862
863 /*
864 * Array of user specified options for ISA adapters.
865 */
866 static int io[MAX_ISA_DEVICES];
867 static int irq[MAX_ISA_DEVICES];
868 static int dma[MAX_ISA_DEVICES];
869 static int debug_level;
870 static int maxframe[MAX_TOTAL_DEVICES];
871 static int txdmabufs[MAX_TOTAL_DEVICES];
872 static int txholdbufs[MAX_TOTAL_DEVICES];
873
874 module_param(break_on_load, bool, 0);
875 module_param(ttymajor, int, 0);
876 module_param_array(io, int, NULL, 0);
877 module_param_array(irq, int, NULL, 0);
878 module_param_array(dma, int, NULL, 0);
879 module_param(debug_level, int, 0);
880 module_param_array(maxframe, int, NULL, 0);
881 module_param_array(txdmabufs, int, NULL, 0);
882 module_param_array(txholdbufs, int, NULL, 0);
883
884 static char *driver_name = "SyncLink serial driver";
885 static char *driver_version = "$Revision: 4.38 $";
886
887 static int synclink_init_one (struct pci_dev *dev,
888 const struct pci_device_id *ent);
889 static void synclink_remove_one (struct pci_dev *dev);
890
891 static struct pci_device_id synclink_pci_tbl[] = {
892 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
893 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
894 { 0, }, /* terminate list */
895 };
896 MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
897
898 MODULE_LICENSE("GPL");
899
900 static struct pci_driver synclink_pci_driver = {
901 .name = "synclink",
902 .id_table = synclink_pci_tbl,
903 .probe = synclink_init_one,
904 .remove = __devexit_p(synclink_remove_one),
905 };
906
907 static struct tty_driver *serial_driver;
908
909 /* number of characters left in xmit buffer before we ask for more */
910 #define WAKEUP_CHARS 256
911
912
913 static void mgsl_change_params(struct mgsl_struct *info);
914 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
915
916 /*
917 * 1st function defined in .text section. Calling this function in
918 * init_module() followed by a breakpoint allows a remote debugger
919 * (gdb) to get the .text address for the add-symbol-file command.
920 * This allows remote debugging of dynamically loadable modules.
921 */
mgsl_get_text_ptr(void)922 static void* mgsl_get_text_ptr(void)
923 {
924 return mgsl_get_text_ptr;
925 }
926
mgsl_paranoia_check(struct mgsl_struct * info,char * name,const char * routine)927 static inline int mgsl_paranoia_check(struct mgsl_struct *info,
928 char *name, const char *routine)
929 {
930 #ifdef MGSL_PARANOIA_CHECK
931 static const char *badmagic =
932 "Warning: bad magic number for mgsl struct (%s) in %s\n";
933 static const char *badinfo =
934 "Warning: null mgsl_struct for (%s) in %s\n";
935
936 if (!info) {
937 printk(badinfo, name, routine);
938 return 1;
939 }
940 if (info->magic != MGSL_MAGIC) {
941 printk(badmagic, name, routine);
942 return 1;
943 }
944 #else
945 if (!info)
946 return 1;
947 #endif
948 return 0;
949 }
950
951 /**
952 * line discipline callback wrappers
953 *
954 * The wrappers maintain line discipline references
955 * while calling into the line discipline.
956 *
957 * ldisc_receive_buf - pass receive data to line discipline
958 */
959
ldisc_receive_buf(struct tty_struct * tty,const __u8 * data,char * flags,int count)960 static void ldisc_receive_buf(struct tty_struct *tty,
961 const __u8 *data, char *flags, int count)
962 {
963 struct tty_ldisc *ld;
964 if (!tty)
965 return;
966 ld = tty_ldisc_ref(tty);
967 if (ld) {
968 if (ld->ops->receive_buf)
969 ld->ops->receive_buf(tty, data, flags, count);
970 tty_ldisc_deref(ld);
971 }
972 }
973
974 /* mgsl_stop() throttle (stop) transmitter
975 *
976 * Arguments: tty pointer to tty info structure
977 * Return Value: None
978 */
mgsl_stop(struct tty_struct * tty)979 static void mgsl_stop(struct tty_struct *tty)
980 {
981 struct mgsl_struct *info = tty->driver_data;
982 unsigned long flags;
983
984 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
985 return;
986
987 if ( debug_level >= DEBUG_LEVEL_INFO )
988 printk("mgsl_stop(%s)\n",info->device_name);
989
990 spin_lock_irqsave(&info->irq_spinlock,flags);
991 if (info->tx_enabled)
992 usc_stop_transmitter(info);
993 spin_unlock_irqrestore(&info->irq_spinlock,flags);
994
995 } /* end of mgsl_stop() */
996
997 /* mgsl_start() release (start) transmitter
998 *
999 * Arguments: tty pointer to tty info structure
1000 * Return Value: None
1001 */
mgsl_start(struct tty_struct * tty)1002 static void mgsl_start(struct tty_struct *tty)
1003 {
1004 struct mgsl_struct *info = tty->driver_data;
1005 unsigned long flags;
1006
1007 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1008 return;
1009
1010 if ( debug_level >= DEBUG_LEVEL_INFO )
1011 printk("mgsl_start(%s)\n",info->device_name);
1012
1013 spin_lock_irqsave(&info->irq_spinlock,flags);
1014 if (!info->tx_enabled)
1015 usc_start_transmitter(info);
1016 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1017
1018 } /* end of mgsl_start() */
1019
1020 /*
1021 * Bottom half work queue access functions
1022 */
1023
1024 /* mgsl_bh_action() Return next bottom half action to perform.
1025 * Return Value: BH action code or 0 if nothing to do.
1026 */
mgsl_bh_action(struct mgsl_struct * info)1027 static int mgsl_bh_action(struct mgsl_struct *info)
1028 {
1029 unsigned long flags;
1030 int rc = 0;
1031
1032 spin_lock_irqsave(&info->irq_spinlock,flags);
1033
1034 if (info->pending_bh & BH_RECEIVE) {
1035 info->pending_bh &= ~BH_RECEIVE;
1036 rc = BH_RECEIVE;
1037 } else if (info->pending_bh & BH_TRANSMIT) {
1038 info->pending_bh &= ~BH_TRANSMIT;
1039 rc = BH_TRANSMIT;
1040 } else if (info->pending_bh & BH_STATUS) {
1041 info->pending_bh &= ~BH_STATUS;
1042 rc = BH_STATUS;
1043 }
1044
1045 if (!rc) {
1046 /* Mark BH routine as complete */
1047 info->bh_running = false;
1048 info->bh_requested = false;
1049 }
1050
1051 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1052
1053 return rc;
1054 }
1055
1056 /*
1057 * Perform bottom half processing of work items queued by ISR.
1058 */
mgsl_bh_handler(struct work_struct * work)1059 static void mgsl_bh_handler(struct work_struct *work)
1060 {
1061 struct mgsl_struct *info =
1062 container_of(work, struct mgsl_struct, task);
1063 int action;
1064
1065 if (!info)
1066 return;
1067
1068 if ( debug_level >= DEBUG_LEVEL_BH )
1069 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1070 __FILE__,__LINE__,info->device_name);
1071
1072 info->bh_running = true;
1073
1074 while((action = mgsl_bh_action(info)) != 0) {
1075
1076 /* Process work item */
1077 if ( debug_level >= DEBUG_LEVEL_BH )
1078 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1079 __FILE__,__LINE__,action);
1080
1081 switch (action) {
1082
1083 case BH_RECEIVE:
1084 mgsl_bh_receive(info);
1085 break;
1086 case BH_TRANSMIT:
1087 mgsl_bh_transmit(info);
1088 break;
1089 case BH_STATUS:
1090 mgsl_bh_status(info);
1091 break;
1092 default:
1093 /* unknown work item ID */
1094 printk("Unknown work item ID=%08X!\n", action);
1095 break;
1096 }
1097 }
1098
1099 if ( debug_level >= DEBUG_LEVEL_BH )
1100 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1101 __FILE__,__LINE__,info->device_name);
1102 }
1103
mgsl_bh_receive(struct mgsl_struct * info)1104 static void mgsl_bh_receive(struct mgsl_struct *info)
1105 {
1106 bool (*get_rx_frame)(struct mgsl_struct *info) =
1107 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1108
1109 if ( debug_level >= DEBUG_LEVEL_BH )
1110 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1111 __FILE__,__LINE__,info->device_name);
1112
1113 do
1114 {
1115 if (info->rx_rcc_underrun) {
1116 unsigned long flags;
1117 spin_lock_irqsave(&info->irq_spinlock,flags);
1118 usc_start_receiver(info);
1119 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1120 return;
1121 }
1122 } while(get_rx_frame(info));
1123 }
1124
mgsl_bh_transmit(struct mgsl_struct * info)1125 static void mgsl_bh_transmit(struct mgsl_struct *info)
1126 {
1127 struct tty_struct *tty = info->port.tty;
1128 unsigned long flags;
1129
1130 if ( debug_level >= DEBUG_LEVEL_BH )
1131 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1132 __FILE__,__LINE__,info->device_name);
1133
1134 if (tty)
1135 tty_wakeup(tty);
1136
1137 /* if transmitter idle and loopmode_send_done_requested
1138 * then start echoing RxD to TxD
1139 */
1140 spin_lock_irqsave(&info->irq_spinlock,flags);
1141 if ( !info->tx_active && info->loopmode_send_done_requested )
1142 usc_loopmode_send_done( info );
1143 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1144 }
1145
mgsl_bh_status(struct mgsl_struct * info)1146 static void mgsl_bh_status(struct mgsl_struct *info)
1147 {
1148 if ( debug_level >= DEBUG_LEVEL_BH )
1149 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1150 __FILE__,__LINE__,info->device_name);
1151
1152 info->ri_chkcount = 0;
1153 info->dsr_chkcount = 0;
1154 info->dcd_chkcount = 0;
1155 info->cts_chkcount = 0;
1156 }
1157
1158 /* mgsl_isr_receive_status()
1159 *
1160 * Service a receive status interrupt. The type of status
1161 * interrupt is indicated by the state of the RCSR.
1162 * This is only used for HDLC mode.
1163 *
1164 * Arguments: info pointer to device instance data
1165 * Return Value: None
1166 */
mgsl_isr_receive_status(struct mgsl_struct * info)1167 static void mgsl_isr_receive_status( struct mgsl_struct *info )
1168 {
1169 u16 status = usc_InReg( info, RCSR );
1170
1171 if ( debug_level >= DEBUG_LEVEL_ISR )
1172 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1173 __FILE__,__LINE__,status);
1174
1175 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1176 info->loopmode_insert_requested &&
1177 usc_loopmode_active(info) )
1178 {
1179 ++info->icount.rxabort;
1180 info->loopmode_insert_requested = false;
1181
1182 /* clear CMR:13 to start echoing RxD to TxD */
1183 info->cmr_value &= ~BIT13;
1184 usc_OutReg(info, CMR, info->cmr_value);
1185
1186 /* disable received abort irq (no longer required) */
1187 usc_OutReg(info, RICR,
1188 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1189 }
1190
1191 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1192 if (status & RXSTATUS_EXITED_HUNT)
1193 info->icount.exithunt++;
1194 if (status & RXSTATUS_IDLE_RECEIVED)
1195 info->icount.rxidle++;
1196 wake_up_interruptible(&info->event_wait_q);
1197 }
1198
1199 if (status & RXSTATUS_OVERRUN){
1200 info->icount.rxover++;
1201 usc_process_rxoverrun_sync( info );
1202 }
1203
1204 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1205 usc_UnlatchRxstatusBits( info, status );
1206
1207 } /* end of mgsl_isr_receive_status() */
1208
1209 /* mgsl_isr_transmit_status()
1210 *
1211 * Service a transmit status interrupt
1212 * HDLC mode :end of transmit frame
1213 * Async mode:all data is sent
1214 * transmit status is indicated by bits in the TCSR.
1215 *
1216 * Arguments: info pointer to device instance data
1217 * Return Value: None
1218 */
mgsl_isr_transmit_status(struct mgsl_struct * info)1219 static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1220 {
1221 u16 status = usc_InReg( info, TCSR );
1222
1223 if ( debug_level >= DEBUG_LEVEL_ISR )
1224 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1225 __FILE__,__LINE__,status);
1226
1227 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1228 usc_UnlatchTxstatusBits( info, status );
1229
1230 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1231 {
1232 /* finished sending HDLC abort. This may leave */
1233 /* the TxFifo with data from the aborted frame */
1234 /* so purge the TxFifo. Also shutdown the DMA */
1235 /* channel in case there is data remaining in */
1236 /* the DMA buffer */
1237 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1238 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1239 }
1240
1241 if ( status & TXSTATUS_EOF_SENT )
1242 info->icount.txok++;
1243 else if ( status & TXSTATUS_UNDERRUN )
1244 info->icount.txunder++;
1245 else if ( status & TXSTATUS_ABORT_SENT )
1246 info->icount.txabort++;
1247 else
1248 info->icount.txunder++;
1249
1250 info->tx_active = false;
1251 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1252 del_timer(&info->tx_timer);
1253
1254 if ( info->drop_rts_on_tx_done ) {
1255 usc_get_serial_signals( info );
1256 if ( info->serial_signals & SerialSignal_RTS ) {
1257 info->serial_signals &= ~SerialSignal_RTS;
1258 usc_set_serial_signals( info );
1259 }
1260 info->drop_rts_on_tx_done = false;
1261 }
1262
1263 #if SYNCLINK_GENERIC_HDLC
1264 if (info->netcount)
1265 hdlcdev_tx_done(info);
1266 else
1267 #endif
1268 {
1269 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1270 usc_stop_transmitter(info);
1271 return;
1272 }
1273 info->pending_bh |= BH_TRANSMIT;
1274 }
1275
1276 } /* end of mgsl_isr_transmit_status() */
1277
1278 /* mgsl_isr_io_pin()
1279 *
1280 * Service an Input/Output pin interrupt. The type of
1281 * interrupt is indicated by bits in the MISR
1282 *
1283 * Arguments: info pointer to device instance data
1284 * Return Value: None
1285 */
mgsl_isr_io_pin(struct mgsl_struct * info)1286 static void mgsl_isr_io_pin( struct mgsl_struct *info )
1287 {
1288 struct mgsl_icount *icount;
1289 u16 status = usc_InReg( info, MISR );
1290
1291 if ( debug_level >= DEBUG_LEVEL_ISR )
1292 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1293 __FILE__,__LINE__,status);
1294
1295 usc_ClearIrqPendingBits( info, IO_PIN );
1296 usc_UnlatchIostatusBits( info, status );
1297
1298 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1299 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1300 icount = &info->icount;
1301 /* update input line counters */
1302 if (status & MISCSTATUS_RI_LATCHED) {
1303 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1304 usc_DisablestatusIrqs(info,SICR_RI);
1305 icount->rng++;
1306 if ( status & MISCSTATUS_RI )
1307 info->input_signal_events.ri_up++;
1308 else
1309 info->input_signal_events.ri_down++;
1310 }
1311 if (status & MISCSTATUS_DSR_LATCHED) {
1312 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1313 usc_DisablestatusIrqs(info,SICR_DSR);
1314 icount->dsr++;
1315 if ( status & MISCSTATUS_DSR )
1316 info->input_signal_events.dsr_up++;
1317 else
1318 info->input_signal_events.dsr_down++;
1319 }
1320 if (status & MISCSTATUS_DCD_LATCHED) {
1321 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1322 usc_DisablestatusIrqs(info,SICR_DCD);
1323 icount->dcd++;
1324 if (status & MISCSTATUS_DCD) {
1325 info->input_signal_events.dcd_up++;
1326 } else
1327 info->input_signal_events.dcd_down++;
1328 #if SYNCLINK_GENERIC_HDLC
1329 if (info->netcount) {
1330 if (status & MISCSTATUS_DCD)
1331 netif_carrier_on(info->netdev);
1332 else
1333 netif_carrier_off(info->netdev);
1334 }
1335 #endif
1336 }
1337 if (status & MISCSTATUS_CTS_LATCHED)
1338 {
1339 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1340 usc_DisablestatusIrqs(info,SICR_CTS);
1341 icount->cts++;
1342 if ( status & MISCSTATUS_CTS )
1343 info->input_signal_events.cts_up++;
1344 else
1345 info->input_signal_events.cts_down++;
1346 }
1347 wake_up_interruptible(&info->status_event_wait_q);
1348 wake_up_interruptible(&info->event_wait_q);
1349
1350 if ( (info->port.flags & ASYNC_CHECK_CD) &&
1351 (status & MISCSTATUS_DCD_LATCHED) ) {
1352 if ( debug_level >= DEBUG_LEVEL_ISR )
1353 printk("%s CD now %s...", info->device_name,
1354 (status & MISCSTATUS_DCD) ? "on" : "off");
1355 if (status & MISCSTATUS_DCD)
1356 wake_up_interruptible(&info->port.open_wait);
1357 else {
1358 if ( debug_level >= DEBUG_LEVEL_ISR )
1359 printk("doing serial hangup...");
1360 if (info->port.tty)
1361 tty_hangup(info->port.tty);
1362 }
1363 }
1364
1365 if ( (info->port.flags & ASYNC_CTS_FLOW) &&
1366 (status & MISCSTATUS_CTS_LATCHED) ) {
1367 if (info->port.tty->hw_stopped) {
1368 if (status & MISCSTATUS_CTS) {
1369 if ( debug_level >= DEBUG_LEVEL_ISR )
1370 printk("CTS tx start...");
1371 if (info->port.tty)
1372 info->port.tty->hw_stopped = 0;
1373 usc_start_transmitter(info);
1374 info->pending_bh |= BH_TRANSMIT;
1375 return;
1376 }
1377 } else {
1378 if (!(status & MISCSTATUS_CTS)) {
1379 if ( debug_level >= DEBUG_LEVEL_ISR )
1380 printk("CTS tx stop...");
1381 if (info->port.tty)
1382 info->port.tty->hw_stopped = 1;
1383 usc_stop_transmitter(info);
1384 }
1385 }
1386 }
1387 }
1388
1389 info->pending_bh |= BH_STATUS;
1390
1391 /* for diagnostics set IRQ flag */
1392 if ( status & MISCSTATUS_TXC_LATCHED ){
1393 usc_OutReg( info, SICR,
1394 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1395 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1396 info->irq_occurred = true;
1397 }
1398
1399 } /* end of mgsl_isr_io_pin() */
1400
1401 /* mgsl_isr_transmit_data()
1402 *
1403 * Service a transmit data interrupt (async mode only).
1404 *
1405 * Arguments: info pointer to device instance data
1406 * Return Value: None
1407 */
mgsl_isr_transmit_data(struct mgsl_struct * info)1408 static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1409 {
1410 if ( debug_level >= DEBUG_LEVEL_ISR )
1411 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1412 __FILE__,__LINE__,info->xmit_cnt);
1413
1414 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1415
1416 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1417 usc_stop_transmitter(info);
1418 return;
1419 }
1420
1421 if ( info->xmit_cnt )
1422 usc_load_txfifo( info );
1423 else
1424 info->tx_active = false;
1425
1426 if (info->xmit_cnt < WAKEUP_CHARS)
1427 info->pending_bh |= BH_TRANSMIT;
1428
1429 } /* end of mgsl_isr_transmit_data() */
1430
1431 /* mgsl_isr_receive_data()
1432 *
1433 * Service a receive data interrupt. This occurs
1434 * when operating in asynchronous interrupt transfer mode.
1435 * The receive data FIFO is flushed to the receive data buffers.
1436 *
1437 * Arguments: info pointer to device instance data
1438 * Return Value: None
1439 */
mgsl_isr_receive_data(struct mgsl_struct * info)1440 static void mgsl_isr_receive_data( struct mgsl_struct *info )
1441 {
1442 int Fifocount;
1443 u16 status;
1444 int work = 0;
1445 unsigned char DataByte;
1446 struct tty_struct *tty = info->port.tty;
1447 struct mgsl_icount *icount = &info->icount;
1448
1449 if ( debug_level >= DEBUG_LEVEL_ISR )
1450 printk("%s(%d):mgsl_isr_receive_data\n",
1451 __FILE__,__LINE__);
1452
1453 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1454
1455 /* select FIFO status for RICR readback */
1456 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1457
1458 /* clear the Wordstatus bit so that status readback */
1459 /* only reflects the status of this byte */
1460 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1461
1462 /* flush the receive FIFO */
1463
1464 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1465 int flag;
1466
1467 /* read one byte from RxFIFO */
1468 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1469 info->io_base + CCAR );
1470 DataByte = inb( info->io_base + CCAR );
1471
1472 /* get the status of the received byte */
1473 status = usc_InReg(info, RCSR);
1474 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1475 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1476 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1477
1478 icount->rx++;
1479
1480 flag = 0;
1481 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1482 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1483 printk("rxerr=%04X\n",status);
1484 /* update error statistics */
1485 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1486 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1487 icount->brk++;
1488 } else if (status & RXSTATUS_PARITY_ERROR)
1489 icount->parity++;
1490 else if (status & RXSTATUS_FRAMING_ERROR)
1491 icount->frame++;
1492 else if (status & RXSTATUS_OVERRUN) {
1493 /* must issue purge fifo cmd before */
1494 /* 16C32 accepts more receive chars */
1495 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1496 icount->overrun++;
1497 }
1498
1499 /* discard char if tty control flags say so */
1500 if (status & info->ignore_status_mask)
1501 continue;
1502
1503 status &= info->read_status_mask;
1504
1505 if (status & RXSTATUS_BREAK_RECEIVED) {
1506 flag = TTY_BREAK;
1507 if (info->port.flags & ASYNC_SAK)
1508 do_SAK(tty);
1509 } else if (status & RXSTATUS_PARITY_ERROR)
1510 flag = TTY_PARITY;
1511 else if (status & RXSTATUS_FRAMING_ERROR)
1512 flag = TTY_FRAME;
1513 } /* end of if (error) */
1514 tty_insert_flip_char(tty, DataByte, flag);
1515 if (status & RXSTATUS_OVERRUN) {
1516 /* Overrun is special, since it's
1517 * reported immediately, and doesn't
1518 * affect the current character
1519 */
1520 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1521 }
1522 }
1523
1524 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1525 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1526 __FILE__,__LINE__,icount->rx,icount->brk,
1527 icount->parity,icount->frame,icount->overrun);
1528 }
1529
1530 if(work)
1531 tty_flip_buffer_push(tty);
1532 }
1533
1534 /* mgsl_isr_misc()
1535 *
1536 * Service a miscellaneous interrupt source.
1537 *
1538 * Arguments: info pointer to device extension (instance data)
1539 * Return Value: None
1540 */
mgsl_isr_misc(struct mgsl_struct * info)1541 static void mgsl_isr_misc( struct mgsl_struct *info )
1542 {
1543 u16 status = usc_InReg( info, MISR );
1544
1545 if ( debug_level >= DEBUG_LEVEL_ISR )
1546 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1547 __FILE__,__LINE__,status);
1548
1549 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1550 (info->params.mode == MGSL_MODE_HDLC)) {
1551
1552 /* turn off receiver and rx DMA */
1553 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1554 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1555 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1556 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1557 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1558
1559 /* schedule BH handler to restart receiver */
1560 info->pending_bh |= BH_RECEIVE;
1561 info->rx_rcc_underrun = true;
1562 }
1563
1564 usc_ClearIrqPendingBits( info, MISC );
1565 usc_UnlatchMiscstatusBits( info, status );
1566
1567 } /* end of mgsl_isr_misc() */
1568
1569 /* mgsl_isr_null()
1570 *
1571 * Services undefined interrupt vectors from the
1572 * USC. (hence this function SHOULD never be called)
1573 *
1574 * Arguments: info pointer to device extension (instance data)
1575 * Return Value: None
1576 */
mgsl_isr_null(struct mgsl_struct * info)1577 static void mgsl_isr_null( struct mgsl_struct *info )
1578 {
1579
1580 } /* end of mgsl_isr_null() */
1581
1582 /* mgsl_isr_receive_dma()
1583 *
1584 * Service a receive DMA channel interrupt.
1585 * For this driver there are two sources of receive DMA interrupts
1586 * as identified in the Receive DMA mode Register (RDMR):
1587 *
1588 * BIT3 EOA/EOL End of List, all receive buffers in receive
1589 * buffer list have been filled (no more free buffers
1590 * available). The DMA controller has shut down.
1591 *
1592 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1593 * DMA buffer is terminated in response to completion
1594 * of a good frame or a frame with errors. The status
1595 * of the frame is stored in the buffer entry in the
1596 * list of receive buffer entries.
1597 *
1598 * Arguments: info pointer to device instance data
1599 * Return Value: None
1600 */
mgsl_isr_receive_dma(struct mgsl_struct * info)1601 static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1602 {
1603 u16 status;
1604
1605 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1606 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1607
1608 /* Read the receive DMA status to identify interrupt type. */
1609 /* This also clears the status bits. */
1610 status = usc_InDmaReg( info, RDMR );
1611
1612 if ( debug_level >= DEBUG_LEVEL_ISR )
1613 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1614 __FILE__,__LINE__,info->device_name,status);
1615
1616 info->pending_bh |= BH_RECEIVE;
1617
1618 if ( status & BIT3 ) {
1619 info->rx_overflow = true;
1620 info->icount.buf_overrun++;
1621 }
1622
1623 } /* end of mgsl_isr_receive_dma() */
1624
1625 /* mgsl_isr_transmit_dma()
1626 *
1627 * This function services a transmit DMA channel interrupt.
1628 *
1629 * For this driver there is one source of transmit DMA interrupts
1630 * as identified in the Transmit DMA Mode Register (TDMR):
1631 *
1632 * BIT2 EOB End of Buffer. This interrupt occurs when a
1633 * transmit DMA buffer has been emptied.
1634 *
1635 * The driver maintains enough transmit DMA buffers to hold at least
1636 * one max frame size transmit frame. When operating in a buffered
1637 * transmit mode, there may be enough transmit DMA buffers to hold at
1638 * least two or more max frame size frames. On an EOB condition,
1639 * determine if there are any queued transmit buffers and copy into
1640 * transmit DMA buffers if we have room.
1641 *
1642 * Arguments: info pointer to device instance data
1643 * Return Value: None
1644 */
mgsl_isr_transmit_dma(struct mgsl_struct * info)1645 static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1646 {
1647 u16 status;
1648
1649 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1650 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1651
1652 /* Read the transmit DMA status to identify interrupt type. */
1653 /* This also clears the status bits. */
1654
1655 status = usc_InDmaReg( info, TDMR );
1656
1657 if ( debug_level >= DEBUG_LEVEL_ISR )
1658 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1659 __FILE__,__LINE__,info->device_name,status);
1660
1661 if ( status & BIT2 ) {
1662 --info->tx_dma_buffers_used;
1663
1664 /* if there are transmit frames queued,
1665 * try to load the next one
1666 */
1667 if ( load_next_tx_holding_buffer(info) ) {
1668 /* if call returns non-zero value, we have
1669 * at least one free tx holding buffer
1670 */
1671 info->pending_bh |= BH_TRANSMIT;
1672 }
1673 }
1674
1675 } /* end of mgsl_isr_transmit_dma() */
1676
1677 /* mgsl_interrupt()
1678 *
1679 * Interrupt service routine entry point.
1680 *
1681 * Arguments:
1682 *
1683 * irq interrupt number that caused interrupt
1684 * dev_id device ID supplied during interrupt registration
1685 *
1686 * Return Value: None
1687 */
mgsl_interrupt(int dummy,void * dev_id)1688 static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1689 {
1690 struct mgsl_struct *info = dev_id;
1691 u16 UscVector;
1692 u16 DmaVector;
1693
1694 if ( debug_level >= DEBUG_LEVEL_ISR )
1695 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1696 __FILE__, __LINE__, info->irq_level);
1697
1698 spin_lock(&info->irq_spinlock);
1699
1700 for(;;) {
1701 /* Read the interrupt vectors from hardware. */
1702 UscVector = usc_InReg(info, IVR) >> 9;
1703 DmaVector = usc_InDmaReg(info, DIVR);
1704
1705 if ( debug_level >= DEBUG_LEVEL_ISR )
1706 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1707 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1708
1709 if ( !UscVector && !DmaVector )
1710 break;
1711
1712 /* Dispatch interrupt vector */
1713 if ( UscVector )
1714 (*UscIsrTable[UscVector])(info);
1715 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1716 mgsl_isr_transmit_dma(info);
1717 else
1718 mgsl_isr_receive_dma(info);
1719
1720 if ( info->isr_overflow ) {
1721 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1722 __FILE__, __LINE__, info->device_name, info->irq_level);
1723 usc_DisableMasterIrqBit(info);
1724 usc_DisableDmaInterrupts(info,DICR_MASTER);
1725 break;
1726 }
1727 }
1728
1729 /* Request bottom half processing if there's something
1730 * for it to do and the bh is not already running
1731 */
1732
1733 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1734 if ( debug_level >= DEBUG_LEVEL_ISR )
1735 printk("%s(%d):%s queueing bh task.\n",
1736 __FILE__,__LINE__,info->device_name);
1737 schedule_work(&info->task);
1738 info->bh_requested = true;
1739 }
1740
1741 spin_unlock(&info->irq_spinlock);
1742
1743 if ( debug_level >= DEBUG_LEVEL_ISR )
1744 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1745 __FILE__, __LINE__, info->irq_level);
1746
1747 return IRQ_HANDLED;
1748 } /* end of mgsl_interrupt() */
1749
1750 /* startup()
1751 *
1752 * Initialize and start device.
1753 *
1754 * Arguments: info pointer to device instance data
1755 * Return Value: 0 if success, otherwise error code
1756 */
startup(struct mgsl_struct * info)1757 static int startup(struct mgsl_struct * info)
1758 {
1759 int retval = 0;
1760
1761 if ( debug_level >= DEBUG_LEVEL_INFO )
1762 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1763
1764 if (info->port.flags & ASYNC_INITIALIZED)
1765 return 0;
1766
1767 if (!info->xmit_buf) {
1768 /* allocate a page of memory for a transmit buffer */
1769 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1770 if (!info->xmit_buf) {
1771 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1772 __FILE__,__LINE__,info->device_name);
1773 return -ENOMEM;
1774 }
1775 }
1776
1777 info->pending_bh = 0;
1778
1779 memset(&info->icount, 0, sizeof(info->icount));
1780
1781 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
1782
1783 /* Allocate and claim adapter resources */
1784 retval = mgsl_claim_resources(info);
1785
1786 /* perform existence check and diagnostics */
1787 if ( !retval )
1788 retval = mgsl_adapter_test(info);
1789
1790 if ( retval ) {
1791 if (capable(CAP_SYS_ADMIN) && info->port.tty)
1792 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1793 mgsl_release_resources(info);
1794 return retval;
1795 }
1796
1797 /* program hardware for current parameters */
1798 mgsl_change_params(info);
1799
1800 if (info->port.tty)
1801 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1802
1803 info->port.flags |= ASYNC_INITIALIZED;
1804
1805 return 0;
1806
1807 } /* end of startup() */
1808
1809 /* shutdown()
1810 *
1811 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1812 *
1813 * Arguments: info pointer to device instance data
1814 * Return Value: None
1815 */
shutdown(struct mgsl_struct * info)1816 static void shutdown(struct mgsl_struct * info)
1817 {
1818 unsigned long flags;
1819
1820 if (!(info->port.flags & ASYNC_INITIALIZED))
1821 return;
1822
1823 if (debug_level >= DEBUG_LEVEL_INFO)
1824 printk("%s(%d):mgsl_shutdown(%s)\n",
1825 __FILE__,__LINE__, info->device_name );
1826
1827 /* clear status wait queue because status changes */
1828 /* can't happen after shutting down the hardware */
1829 wake_up_interruptible(&info->status_event_wait_q);
1830 wake_up_interruptible(&info->event_wait_q);
1831
1832 del_timer_sync(&info->tx_timer);
1833
1834 if (info->xmit_buf) {
1835 free_page((unsigned long) info->xmit_buf);
1836 info->xmit_buf = NULL;
1837 }
1838
1839 spin_lock_irqsave(&info->irq_spinlock,flags);
1840 usc_DisableMasterIrqBit(info);
1841 usc_stop_receiver(info);
1842 usc_stop_transmitter(info);
1843 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1844 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1845 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1846
1847 /* Disable DMAEN (Port 7, Bit 14) */
1848 /* This disconnects the DMA request signal from the ISA bus */
1849 /* on the ISA adapter. This has no effect for the PCI adapter */
1850 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1851
1852 /* Disable INTEN (Port 6, Bit12) */
1853 /* This disconnects the IRQ request signal to the ISA bus */
1854 /* on the ISA adapter. This has no effect for the PCI adapter */
1855 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1856
1857 if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
1858 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1859 usc_set_serial_signals(info);
1860 }
1861
1862 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1863
1864 mgsl_release_resources(info);
1865
1866 if (info->port.tty)
1867 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1868
1869 info->port.flags &= ~ASYNC_INITIALIZED;
1870
1871 } /* end of shutdown() */
1872
mgsl_program_hw(struct mgsl_struct * info)1873 static void mgsl_program_hw(struct mgsl_struct *info)
1874 {
1875 unsigned long flags;
1876
1877 spin_lock_irqsave(&info->irq_spinlock,flags);
1878
1879 usc_stop_receiver(info);
1880 usc_stop_transmitter(info);
1881 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1882
1883 if (info->params.mode == MGSL_MODE_HDLC ||
1884 info->params.mode == MGSL_MODE_RAW ||
1885 info->netcount)
1886 usc_set_sync_mode(info);
1887 else
1888 usc_set_async_mode(info);
1889
1890 usc_set_serial_signals(info);
1891
1892 info->dcd_chkcount = 0;
1893 info->cts_chkcount = 0;
1894 info->ri_chkcount = 0;
1895 info->dsr_chkcount = 0;
1896
1897 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1898 usc_EnableInterrupts(info, IO_PIN);
1899 usc_get_serial_signals(info);
1900
1901 if (info->netcount || info->port.tty->termios->c_cflag & CREAD)
1902 usc_start_receiver(info);
1903
1904 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1905 }
1906
1907 /* Reconfigure adapter based on new parameters
1908 */
mgsl_change_params(struct mgsl_struct * info)1909 static void mgsl_change_params(struct mgsl_struct *info)
1910 {
1911 unsigned cflag;
1912 int bits_per_char;
1913
1914 if (!info->port.tty || !info->port.tty->termios)
1915 return;
1916
1917 if (debug_level >= DEBUG_LEVEL_INFO)
1918 printk("%s(%d):mgsl_change_params(%s)\n",
1919 __FILE__,__LINE__, info->device_name );
1920
1921 cflag = info->port.tty->termios->c_cflag;
1922
1923 /* if B0 rate (hangup) specified then negate DTR and RTS */
1924 /* otherwise assert DTR and RTS */
1925 if (cflag & CBAUD)
1926 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1927 else
1928 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1929
1930 /* byte size and parity */
1931
1932 switch (cflag & CSIZE) {
1933 case CS5: info->params.data_bits = 5; break;
1934 case CS6: info->params.data_bits = 6; break;
1935 case CS7: info->params.data_bits = 7; break;
1936 case CS8: info->params.data_bits = 8; break;
1937 /* Never happens, but GCC is too dumb to figure it out */
1938 default: info->params.data_bits = 7; break;
1939 }
1940
1941 if (cflag & CSTOPB)
1942 info->params.stop_bits = 2;
1943 else
1944 info->params.stop_bits = 1;
1945
1946 info->params.parity = ASYNC_PARITY_NONE;
1947 if (cflag & PARENB) {
1948 if (cflag & PARODD)
1949 info->params.parity = ASYNC_PARITY_ODD;
1950 else
1951 info->params.parity = ASYNC_PARITY_EVEN;
1952 #ifdef CMSPAR
1953 if (cflag & CMSPAR)
1954 info->params.parity = ASYNC_PARITY_SPACE;
1955 #endif
1956 }
1957
1958 /* calculate number of jiffies to transmit a full
1959 * FIFO (32 bytes) at specified data rate
1960 */
1961 bits_per_char = info->params.data_bits +
1962 info->params.stop_bits + 1;
1963
1964 /* if port data rate is set to 460800 or less then
1965 * allow tty settings to override, otherwise keep the
1966 * current data rate.
1967 */
1968 if (info->params.data_rate <= 460800)
1969 info->params.data_rate = tty_get_baud_rate(info->port.tty);
1970
1971 if ( info->params.data_rate ) {
1972 info->timeout = (32*HZ*bits_per_char) /
1973 info->params.data_rate;
1974 }
1975 info->timeout += HZ/50; /* Add .02 seconds of slop */
1976
1977 if (cflag & CRTSCTS)
1978 info->port.flags |= ASYNC_CTS_FLOW;
1979 else
1980 info->port.flags &= ~ASYNC_CTS_FLOW;
1981
1982 if (cflag & CLOCAL)
1983 info->port.flags &= ~ASYNC_CHECK_CD;
1984 else
1985 info->port.flags |= ASYNC_CHECK_CD;
1986
1987 /* process tty input control flags */
1988
1989 info->read_status_mask = RXSTATUS_OVERRUN;
1990 if (I_INPCK(info->port.tty))
1991 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1992 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1993 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1994
1995 if (I_IGNPAR(info->port.tty))
1996 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1997 if (I_IGNBRK(info->port.tty)) {
1998 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1999 /* If ignoring parity and break indicators, ignore
2000 * overruns too. (For real raw support).
2001 */
2002 if (I_IGNPAR(info->port.tty))
2003 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2004 }
2005
2006 mgsl_program_hw(info);
2007
2008 } /* end of mgsl_change_params() */
2009
2010 /* mgsl_put_char()
2011 *
2012 * Add a character to the transmit buffer.
2013 *
2014 * Arguments: tty pointer to tty information structure
2015 * ch character to add to transmit buffer
2016 *
2017 * Return Value: None
2018 */
mgsl_put_char(struct tty_struct * tty,unsigned char ch)2019 static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2020 {
2021 struct mgsl_struct *info = tty->driver_data;
2022 unsigned long flags;
2023 int ret = 0;
2024
2025 if (debug_level >= DEBUG_LEVEL_INFO) {
2026 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2027 __FILE__, __LINE__, ch, info->device_name);
2028 }
2029
2030 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2031 return 0;
2032
2033 if (!info->xmit_buf)
2034 return 0;
2035
2036 spin_lock_irqsave(&info->irq_spinlock, flags);
2037
2038 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2039 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2040 info->xmit_buf[info->xmit_head++] = ch;
2041 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2042 info->xmit_cnt++;
2043 ret = 1;
2044 }
2045 }
2046 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2047 return ret;
2048
2049 } /* end of mgsl_put_char() */
2050
2051 /* mgsl_flush_chars()
2052 *
2053 * Enable transmitter so remaining characters in the
2054 * transmit buffer are sent.
2055 *
2056 * Arguments: tty pointer to tty information structure
2057 * Return Value: None
2058 */
mgsl_flush_chars(struct tty_struct * tty)2059 static void mgsl_flush_chars(struct tty_struct *tty)
2060 {
2061 struct mgsl_struct *info = tty->driver_data;
2062 unsigned long flags;
2063
2064 if ( debug_level >= DEBUG_LEVEL_INFO )
2065 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2066 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2067
2068 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2069 return;
2070
2071 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2072 !info->xmit_buf)
2073 return;
2074
2075 if ( debug_level >= DEBUG_LEVEL_INFO )
2076 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2077 __FILE__,__LINE__,info->device_name );
2078
2079 spin_lock_irqsave(&info->irq_spinlock,flags);
2080
2081 if (!info->tx_active) {
2082 if ( (info->params.mode == MGSL_MODE_HDLC ||
2083 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2084 /* operating in synchronous (frame oriented) mode */
2085 /* copy data from circular xmit_buf to */
2086 /* transmit DMA buffer. */
2087 mgsl_load_tx_dma_buffer(info,
2088 info->xmit_buf,info->xmit_cnt);
2089 }
2090 usc_start_transmitter(info);
2091 }
2092
2093 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2094
2095 } /* end of mgsl_flush_chars() */
2096
2097 /* mgsl_write()
2098 *
2099 * Send a block of data
2100 *
2101 * Arguments:
2102 *
2103 * tty pointer to tty information structure
2104 * buf pointer to buffer containing send data
2105 * count size of send data in bytes
2106 *
2107 * Return Value: number of characters written
2108 */
mgsl_write(struct tty_struct * tty,const unsigned char * buf,int count)2109 static int mgsl_write(struct tty_struct * tty,
2110 const unsigned char *buf, int count)
2111 {
2112 int c, ret = 0;
2113 struct mgsl_struct *info = tty->driver_data;
2114 unsigned long flags;
2115
2116 if ( debug_level >= DEBUG_LEVEL_INFO )
2117 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2118 __FILE__,__LINE__,info->device_name,count);
2119
2120 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2121 goto cleanup;
2122
2123 if (!info->xmit_buf)
2124 goto cleanup;
2125
2126 if ( info->params.mode == MGSL_MODE_HDLC ||
2127 info->params.mode == MGSL_MODE_RAW ) {
2128 /* operating in synchronous (frame oriented) mode */
2129 /* operating in synchronous (frame oriented) mode */
2130 if (info->tx_active) {
2131
2132 if ( info->params.mode == MGSL_MODE_HDLC ) {
2133 ret = 0;
2134 goto cleanup;
2135 }
2136 /* transmitter is actively sending data -
2137 * if we have multiple transmit dma and
2138 * holding buffers, attempt to queue this
2139 * frame for transmission at a later time.
2140 */
2141 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2142 /* no tx holding buffers available */
2143 ret = 0;
2144 goto cleanup;
2145 }
2146
2147 /* queue transmit frame request */
2148 ret = count;
2149 save_tx_buffer_request(info,buf,count);
2150
2151 /* if we have sufficient tx dma buffers,
2152 * load the next buffered tx request
2153 */
2154 spin_lock_irqsave(&info->irq_spinlock,flags);
2155 load_next_tx_holding_buffer(info);
2156 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2157 goto cleanup;
2158 }
2159
2160 /* if operating in HDLC LoopMode and the adapter */
2161 /* has yet to be inserted into the loop, we can't */
2162 /* transmit */
2163
2164 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2165 !usc_loopmode_active(info) )
2166 {
2167 ret = 0;
2168 goto cleanup;
2169 }
2170
2171 if ( info->xmit_cnt ) {
2172 /* Send accumulated from send_char() calls */
2173 /* as frame and wait before accepting more data. */
2174 ret = 0;
2175
2176 /* copy data from circular xmit_buf to */
2177 /* transmit DMA buffer. */
2178 mgsl_load_tx_dma_buffer(info,
2179 info->xmit_buf,info->xmit_cnt);
2180 if ( debug_level >= DEBUG_LEVEL_INFO )
2181 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2182 __FILE__,__LINE__,info->device_name);
2183 } else {
2184 if ( debug_level >= DEBUG_LEVEL_INFO )
2185 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2186 __FILE__,__LINE__,info->device_name);
2187 ret = count;
2188 info->xmit_cnt = count;
2189 mgsl_load_tx_dma_buffer(info,buf,count);
2190 }
2191 } else {
2192 while (1) {
2193 spin_lock_irqsave(&info->irq_spinlock,flags);
2194 c = min_t(int, count,
2195 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2196 SERIAL_XMIT_SIZE - info->xmit_head));
2197 if (c <= 0) {
2198 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2199 break;
2200 }
2201 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2202 info->xmit_head = ((info->xmit_head + c) &
2203 (SERIAL_XMIT_SIZE-1));
2204 info->xmit_cnt += c;
2205 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2206 buf += c;
2207 count -= c;
2208 ret += c;
2209 }
2210 }
2211
2212 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2213 spin_lock_irqsave(&info->irq_spinlock,flags);
2214 if (!info->tx_active)
2215 usc_start_transmitter(info);
2216 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2217 }
2218 cleanup:
2219 if ( debug_level >= DEBUG_LEVEL_INFO )
2220 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2221 __FILE__,__LINE__,info->device_name,ret);
2222
2223 return ret;
2224
2225 } /* end of mgsl_write() */
2226
2227 /* mgsl_write_room()
2228 *
2229 * Return the count of free bytes in transmit buffer
2230 *
2231 * Arguments: tty pointer to tty info structure
2232 * Return Value: None
2233 */
mgsl_write_room(struct tty_struct * tty)2234 static int mgsl_write_room(struct tty_struct *tty)
2235 {
2236 struct mgsl_struct *info = tty->driver_data;
2237 int ret;
2238
2239 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2240 return 0;
2241 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2242 if (ret < 0)
2243 ret = 0;
2244
2245 if (debug_level >= DEBUG_LEVEL_INFO)
2246 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2247 __FILE__,__LINE__, info->device_name,ret );
2248
2249 if ( info->params.mode == MGSL_MODE_HDLC ||
2250 info->params.mode == MGSL_MODE_RAW ) {
2251 /* operating in synchronous (frame oriented) mode */
2252 if ( info->tx_active )
2253 return 0;
2254 else
2255 return HDLC_MAX_FRAME_SIZE;
2256 }
2257
2258 return ret;
2259
2260 } /* end of mgsl_write_room() */
2261
2262 /* mgsl_chars_in_buffer()
2263 *
2264 * Return the count of bytes in transmit buffer
2265 *
2266 * Arguments: tty pointer to tty info structure
2267 * Return Value: None
2268 */
mgsl_chars_in_buffer(struct tty_struct * tty)2269 static int mgsl_chars_in_buffer(struct tty_struct *tty)
2270 {
2271 struct mgsl_struct *info = tty->driver_data;
2272
2273 if (debug_level >= DEBUG_LEVEL_INFO)
2274 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2275 __FILE__,__LINE__, info->device_name );
2276
2277 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2278 return 0;
2279
2280 if (debug_level >= DEBUG_LEVEL_INFO)
2281 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2282 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2283
2284 if ( info->params.mode == MGSL_MODE_HDLC ||
2285 info->params.mode == MGSL_MODE_RAW ) {
2286 /* operating in synchronous (frame oriented) mode */
2287 if ( info->tx_active )
2288 return info->max_frame_size;
2289 else
2290 return 0;
2291 }
2292
2293 return info->xmit_cnt;
2294 } /* end of mgsl_chars_in_buffer() */
2295
2296 /* mgsl_flush_buffer()
2297 *
2298 * Discard all data in the send buffer
2299 *
2300 * Arguments: tty pointer to tty info structure
2301 * Return Value: None
2302 */
mgsl_flush_buffer(struct tty_struct * tty)2303 static void mgsl_flush_buffer(struct tty_struct *tty)
2304 {
2305 struct mgsl_struct *info = tty->driver_data;
2306 unsigned long flags;
2307
2308 if (debug_level >= DEBUG_LEVEL_INFO)
2309 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2310 __FILE__,__LINE__, info->device_name );
2311
2312 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2313 return;
2314
2315 spin_lock_irqsave(&info->irq_spinlock,flags);
2316 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2317 del_timer(&info->tx_timer);
2318 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2319
2320 tty_wakeup(tty);
2321 }
2322
2323 /* mgsl_send_xchar()
2324 *
2325 * Send a high-priority XON/XOFF character
2326 *
2327 * Arguments: tty pointer to tty info structure
2328 * ch character to send
2329 * Return Value: None
2330 */
mgsl_send_xchar(struct tty_struct * tty,char ch)2331 static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2332 {
2333 struct mgsl_struct *info = tty->driver_data;
2334 unsigned long flags;
2335
2336 if (debug_level >= DEBUG_LEVEL_INFO)
2337 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2338 __FILE__,__LINE__, info->device_name, ch );
2339
2340 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2341 return;
2342
2343 info->x_char = ch;
2344 if (ch) {
2345 /* Make sure transmit interrupts are on */
2346 spin_lock_irqsave(&info->irq_spinlock,flags);
2347 if (!info->tx_enabled)
2348 usc_start_transmitter(info);
2349 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2350 }
2351 } /* end of mgsl_send_xchar() */
2352
2353 /* mgsl_throttle()
2354 *
2355 * Signal remote device to throttle send data (our receive data)
2356 *
2357 * Arguments: tty pointer to tty info structure
2358 * Return Value: None
2359 */
mgsl_throttle(struct tty_struct * tty)2360 static void mgsl_throttle(struct tty_struct * tty)
2361 {
2362 struct mgsl_struct *info = tty->driver_data;
2363 unsigned long flags;
2364
2365 if (debug_level >= DEBUG_LEVEL_INFO)
2366 printk("%s(%d):mgsl_throttle(%s) entry\n",
2367 __FILE__,__LINE__, info->device_name );
2368
2369 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2370 return;
2371
2372 if (I_IXOFF(tty))
2373 mgsl_send_xchar(tty, STOP_CHAR(tty));
2374
2375 if (tty->termios->c_cflag & CRTSCTS) {
2376 spin_lock_irqsave(&info->irq_spinlock,flags);
2377 info->serial_signals &= ~SerialSignal_RTS;
2378 usc_set_serial_signals(info);
2379 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2380 }
2381 } /* end of mgsl_throttle() */
2382
2383 /* mgsl_unthrottle()
2384 *
2385 * Signal remote device to stop throttling send data (our receive data)
2386 *
2387 * Arguments: tty pointer to tty info structure
2388 * Return Value: None
2389 */
mgsl_unthrottle(struct tty_struct * tty)2390 static void mgsl_unthrottle(struct tty_struct * tty)
2391 {
2392 struct mgsl_struct *info = tty->driver_data;
2393 unsigned long flags;
2394
2395 if (debug_level >= DEBUG_LEVEL_INFO)
2396 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2397 __FILE__,__LINE__, info->device_name );
2398
2399 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2400 return;
2401
2402 if (I_IXOFF(tty)) {
2403 if (info->x_char)
2404 info->x_char = 0;
2405 else
2406 mgsl_send_xchar(tty, START_CHAR(tty));
2407 }
2408
2409 if (tty->termios->c_cflag & CRTSCTS) {
2410 spin_lock_irqsave(&info->irq_spinlock,flags);
2411 info->serial_signals |= SerialSignal_RTS;
2412 usc_set_serial_signals(info);
2413 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2414 }
2415
2416 } /* end of mgsl_unthrottle() */
2417
2418 /* mgsl_get_stats()
2419 *
2420 * get the current serial parameters information
2421 *
2422 * Arguments: info pointer to device instance data
2423 * user_icount pointer to buffer to hold returned stats
2424 *
2425 * Return Value: 0 if success, otherwise error code
2426 */
mgsl_get_stats(struct mgsl_struct * info,struct mgsl_icount __user * user_icount)2427 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2428 {
2429 int err;
2430
2431 if (debug_level >= DEBUG_LEVEL_INFO)
2432 printk("%s(%d):mgsl_get_params(%s)\n",
2433 __FILE__,__LINE__, info->device_name);
2434
2435 if (!user_icount) {
2436 memset(&info->icount, 0, sizeof(info->icount));
2437 } else {
2438 mutex_lock(&info->port.mutex);
2439 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2440 mutex_unlock(&info->port.mutex);
2441 if (err)
2442 return -EFAULT;
2443 }
2444
2445 return 0;
2446
2447 } /* end of mgsl_get_stats() */
2448
2449 /* mgsl_get_params()
2450 *
2451 * get the current serial parameters information
2452 *
2453 * Arguments: info pointer to device instance data
2454 * user_params pointer to buffer to hold returned params
2455 *
2456 * Return Value: 0 if success, otherwise error code
2457 */
mgsl_get_params(struct mgsl_struct * info,MGSL_PARAMS __user * user_params)2458 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2459 {
2460 int err;
2461 if (debug_level >= DEBUG_LEVEL_INFO)
2462 printk("%s(%d):mgsl_get_params(%s)\n",
2463 __FILE__,__LINE__, info->device_name);
2464
2465 mutex_lock(&info->port.mutex);
2466 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2467 mutex_unlock(&info->port.mutex);
2468 if (err) {
2469 if ( debug_level >= DEBUG_LEVEL_INFO )
2470 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2471 __FILE__,__LINE__,info->device_name);
2472 return -EFAULT;
2473 }
2474
2475 return 0;
2476
2477 } /* end of mgsl_get_params() */
2478
2479 /* mgsl_set_params()
2480 *
2481 * set the serial parameters
2482 *
2483 * Arguments:
2484 *
2485 * info pointer to device instance data
2486 * new_params user buffer containing new serial params
2487 *
2488 * Return Value: 0 if success, otherwise error code
2489 */
mgsl_set_params(struct mgsl_struct * info,MGSL_PARAMS __user * new_params)2490 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2491 {
2492 unsigned long flags;
2493 MGSL_PARAMS tmp_params;
2494 int err;
2495
2496 if (debug_level >= DEBUG_LEVEL_INFO)
2497 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2498 info->device_name );
2499 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2500 if (err) {
2501 if ( debug_level >= DEBUG_LEVEL_INFO )
2502 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2503 __FILE__,__LINE__,info->device_name);
2504 return -EFAULT;
2505 }
2506
2507 mutex_lock(&info->port.mutex);
2508 spin_lock_irqsave(&info->irq_spinlock,flags);
2509 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2510 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2511
2512 mgsl_change_params(info);
2513 mutex_unlock(&info->port.mutex);
2514
2515 return 0;
2516
2517 } /* end of mgsl_set_params() */
2518
2519 /* mgsl_get_txidle()
2520 *
2521 * get the current transmit idle mode
2522 *
2523 * Arguments: info pointer to device instance data
2524 * idle_mode pointer to buffer to hold returned idle mode
2525 *
2526 * Return Value: 0 if success, otherwise error code
2527 */
mgsl_get_txidle(struct mgsl_struct * info,int __user * idle_mode)2528 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2529 {
2530 int err;
2531
2532 if (debug_level >= DEBUG_LEVEL_INFO)
2533 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2534 __FILE__,__LINE__, info->device_name, info->idle_mode);
2535
2536 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2537 if (err) {
2538 if ( debug_level >= DEBUG_LEVEL_INFO )
2539 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2540 __FILE__,__LINE__,info->device_name);
2541 return -EFAULT;
2542 }
2543
2544 return 0;
2545
2546 } /* end of mgsl_get_txidle() */
2547
2548 /* mgsl_set_txidle() service ioctl to set transmit idle mode
2549 *
2550 * Arguments: info pointer to device instance data
2551 * idle_mode new idle mode
2552 *
2553 * Return Value: 0 if success, otherwise error code
2554 */
mgsl_set_txidle(struct mgsl_struct * info,int idle_mode)2555 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2556 {
2557 unsigned long flags;
2558
2559 if (debug_level >= DEBUG_LEVEL_INFO)
2560 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2561 info->device_name, idle_mode );
2562
2563 spin_lock_irqsave(&info->irq_spinlock,flags);
2564 info->idle_mode = idle_mode;
2565 usc_set_txidle( info );
2566 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2567 return 0;
2568
2569 } /* end of mgsl_set_txidle() */
2570
2571 /* mgsl_txenable()
2572 *
2573 * enable or disable the transmitter
2574 *
2575 * Arguments:
2576 *
2577 * info pointer to device instance data
2578 * enable 1 = enable, 0 = disable
2579 *
2580 * Return Value: 0 if success, otherwise error code
2581 */
mgsl_txenable(struct mgsl_struct * info,int enable)2582 static int mgsl_txenable(struct mgsl_struct * info, int enable)
2583 {
2584 unsigned long flags;
2585
2586 if (debug_level >= DEBUG_LEVEL_INFO)
2587 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2588 info->device_name, enable);
2589
2590 spin_lock_irqsave(&info->irq_spinlock,flags);
2591 if ( enable ) {
2592 if ( !info->tx_enabled ) {
2593
2594 usc_start_transmitter(info);
2595 /*--------------------------------------------------
2596 * if HDLC/SDLC Loop mode, attempt to insert the
2597 * station in the 'loop' by setting CMR:13. Upon
2598 * receipt of the next GoAhead (RxAbort) sequence,
2599 * the OnLoop indicator (CCSR:7) should go active
2600 * to indicate that we are on the loop
2601 *--------------------------------------------------*/
2602 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2603 usc_loopmode_insert_request( info );
2604 }
2605 } else {
2606 if ( info->tx_enabled )
2607 usc_stop_transmitter(info);
2608 }
2609 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2610 return 0;
2611
2612 } /* end of mgsl_txenable() */
2613
2614 /* mgsl_txabort() abort send HDLC frame
2615 *
2616 * Arguments: info pointer to device instance data
2617 * Return Value: 0 if success, otherwise error code
2618 */
mgsl_txabort(struct mgsl_struct * info)2619 static int mgsl_txabort(struct mgsl_struct * info)
2620 {
2621 unsigned long flags;
2622
2623 if (debug_level >= DEBUG_LEVEL_INFO)
2624 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2625 info->device_name);
2626
2627 spin_lock_irqsave(&info->irq_spinlock,flags);
2628 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2629 {
2630 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2631 usc_loopmode_cancel_transmit( info );
2632 else
2633 usc_TCmd(info,TCmd_SendAbort);
2634 }
2635 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2636 return 0;
2637
2638 } /* end of mgsl_txabort() */
2639
2640 /* mgsl_rxenable() enable or disable the receiver
2641 *
2642 * Arguments: info pointer to device instance data
2643 * enable 1 = enable, 0 = disable
2644 * Return Value: 0 if success, otherwise error code
2645 */
mgsl_rxenable(struct mgsl_struct * info,int enable)2646 static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2647 {
2648 unsigned long flags;
2649
2650 if (debug_level >= DEBUG_LEVEL_INFO)
2651 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2652 info->device_name, enable);
2653
2654 spin_lock_irqsave(&info->irq_spinlock,flags);
2655 if ( enable ) {
2656 if ( !info->rx_enabled )
2657 usc_start_receiver(info);
2658 } else {
2659 if ( info->rx_enabled )
2660 usc_stop_receiver(info);
2661 }
2662 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2663 return 0;
2664
2665 } /* end of mgsl_rxenable() */
2666
2667 /* mgsl_wait_event() wait for specified event to occur
2668 *
2669 * Arguments: info pointer to device instance data
2670 * mask pointer to bitmask of events to wait for
2671 * Return Value: 0 if successful and bit mask updated with
2672 * of events triggerred,
2673 * otherwise error code
2674 */
mgsl_wait_event(struct mgsl_struct * info,int __user * mask_ptr)2675 static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2676 {
2677 unsigned long flags;
2678 int s;
2679 int rc=0;
2680 struct mgsl_icount cprev, cnow;
2681 int events;
2682 int mask;
2683 struct _input_signal_events oldsigs, newsigs;
2684 DECLARE_WAITQUEUE(wait, current);
2685
2686 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2687 if (rc) {
2688 return -EFAULT;
2689 }
2690
2691 if (debug_level >= DEBUG_LEVEL_INFO)
2692 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2693 info->device_name, mask);
2694
2695 spin_lock_irqsave(&info->irq_spinlock,flags);
2696
2697 /* return immediately if state matches requested events */
2698 usc_get_serial_signals(info);
2699 s = info->serial_signals;
2700 events = mask &
2701 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2702 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2703 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2704 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2705 if (events) {
2706 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2707 goto exit;
2708 }
2709
2710 /* save current irq counts */
2711 cprev = info->icount;
2712 oldsigs = info->input_signal_events;
2713
2714 /* enable hunt and idle irqs if needed */
2715 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2716 u16 oldreg = usc_InReg(info,RICR);
2717 u16 newreg = oldreg +
2718 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2719 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2720 if (oldreg != newreg)
2721 usc_OutReg(info, RICR, newreg);
2722 }
2723
2724 set_current_state(TASK_INTERRUPTIBLE);
2725 add_wait_queue(&info->event_wait_q, &wait);
2726
2727 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2728
2729
2730 for(;;) {
2731 schedule();
2732 if (signal_pending(current)) {
2733 rc = -ERESTARTSYS;
2734 break;
2735 }
2736
2737 /* get current irq counts */
2738 spin_lock_irqsave(&info->irq_spinlock,flags);
2739 cnow = info->icount;
2740 newsigs = info->input_signal_events;
2741 set_current_state(TASK_INTERRUPTIBLE);
2742 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2743
2744 /* if no change, wait aborted for some reason */
2745 if (newsigs.dsr_up == oldsigs.dsr_up &&
2746 newsigs.dsr_down == oldsigs.dsr_down &&
2747 newsigs.dcd_up == oldsigs.dcd_up &&
2748 newsigs.dcd_down == oldsigs.dcd_down &&
2749 newsigs.cts_up == oldsigs.cts_up &&
2750 newsigs.cts_down == oldsigs.cts_down &&
2751 newsigs.ri_up == oldsigs.ri_up &&
2752 newsigs.ri_down == oldsigs.ri_down &&
2753 cnow.exithunt == cprev.exithunt &&
2754 cnow.rxidle == cprev.rxidle) {
2755 rc = -EIO;
2756 break;
2757 }
2758
2759 events = mask &
2760 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2761 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2762 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2763 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2764 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2765 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2766 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2767 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2768 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2769 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2770 if (events)
2771 break;
2772
2773 cprev = cnow;
2774 oldsigs = newsigs;
2775 }
2776
2777 remove_wait_queue(&info->event_wait_q, &wait);
2778 set_current_state(TASK_RUNNING);
2779
2780 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2781 spin_lock_irqsave(&info->irq_spinlock,flags);
2782 if (!waitqueue_active(&info->event_wait_q)) {
2783 /* disable enable exit hunt mode/idle rcvd IRQs */
2784 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2785 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2786 }
2787 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2788 }
2789 exit:
2790 if ( rc == 0 )
2791 PUT_USER(rc, events, mask_ptr);
2792
2793 return rc;
2794
2795 } /* end of mgsl_wait_event() */
2796
modem_input_wait(struct mgsl_struct * info,int arg)2797 static int modem_input_wait(struct mgsl_struct *info,int arg)
2798 {
2799 unsigned long flags;
2800 int rc;
2801 struct mgsl_icount cprev, cnow;
2802 DECLARE_WAITQUEUE(wait, current);
2803
2804 /* save current irq counts */
2805 spin_lock_irqsave(&info->irq_spinlock,flags);
2806 cprev = info->icount;
2807 add_wait_queue(&info->status_event_wait_q, &wait);
2808 set_current_state(TASK_INTERRUPTIBLE);
2809 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2810
2811 for(;;) {
2812 schedule();
2813 if (signal_pending(current)) {
2814 rc = -ERESTARTSYS;
2815 break;
2816 }
2817
2818 /* get new irq counts */
2819 spin_lock_irqsave(&info->irq_spinlock,flags);
2820 cnow = info->icount;
2821 set_current_state(TASK_INTERRUPTIBLE);
2822 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2823
2824 /* if no change, wait aborted for some reason */
2825 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2826 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2827 rc = -EIO;
2828 break;
2829 }
2830
2831 /* check for change in caller specified modem input */
2832 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2833 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2834 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2835 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2836 rc = 0;
2837 break;
2838 }
2839
2840 cprev = cnow;
2841 }
2842 remove_wait_queue(&info->status_event_wait_q, &wait);
2843 set_current_state(TASK_RUNNING);
2844 return rc;
2845 }
2846
2847 /* return the state of the serial control and status signals
2848 */
tiocmget(struct tty_struct * tty)2849 static int tiocmget(struct tty_struct *tty)
2850 {
2851 struct mgsl_struct *info = tty->driver_data;
2852 unsigned int result;
2853 unsigned long flags;
2854
2855 spin_lock_irqsave(&info->irq_spinlock,flags);
2856 usc_get_serial_signals(info);
2857 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2858
2859 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2860 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2861 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2862 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2863 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2864 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2865
2866 if (debug_level >= DEBUG_LEVEL_INFO)
2867 printk("%s(%d):%s tiocmget() value=%08X\n",
2868 __FILE__,__LINE__, info->device_name, result );
2869 return result;
2870 }
2871
2872 /* set modem control signals (DTR/RTS)
2873 */
tiocmset(struct tty_struct * tty,unsigned int set,unsigned int clear)2874 static int tiocmset(struct tty_struct *tty,
2875 unsigned int set, unsigned int clear)
2876 {
2877 struct mgsl_struct *info = tty->driver_data;
2878 unsigned long flags;
2879
2880 if (debug_level >= DEBUG_LEVEL_INFO)
2881 printk("%s(%d):%s tiocmset(%x,%x)\n",
2882 __FILE__,__LINE__,info->device_name, set, clear);
2883
2884 if (set & TIOCM_RTS)
2885 info->serial_signals |= SerialSignal_RTS;
2886 if (set & TIOCM_DTR)
2887 info->serial_signals |= SerialSignal_DTR;
2888 if (clear & TIOCM_RTS)
2889 info->serial_signals &= ~SerialSignal_RTS;
2890 if (clear & TIOCM_DTR)
2891 info->serial_signals &= ~SerialSignal_DTR;
2892
2893 spin_lock_irqsave(&info->irq_spinlock,flags);
2894 usc_set_serial_signals(info);
2895 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2896
2897 return 0;
2898 }
2899
2900 /* mgsl_break() Set or clear transmit break condition
2901 *
2902 * Arguments: tty pointer to tty instance data
2903 * break_state -1=set break condition, 0=clear
2904 * Return Value: error code
2905 */
mgsl_break(struct tty_struct * tty,int break_state)2906 static int mgsl_break(struct tty_struct *tty, int break_state)
2907 {
2908 struct mgsl_struct * info = tty->driver_data;
2909 unsigned long flags;
2910
2911 if (debug_level >= DEBUG_LEVEL_INFO)
2912 printk("%s(%d):mgsl_break(%s,%d)\n",
2913 __FILE__,__LINE__, info->device_name, break_state);
2914
2915 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2916 return -EINVAL;
2917
2918 spin_lock_irqsave(&info->irq_spinlock,flags);
2919 if (break_state == -1)
2920 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2921 else
2922 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2923 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2924 return 0;
2925
2926 } /* end of mgsl_break() */
2927
2928 /*
2929 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2930 * Return: write counters to the user passed counter struct
2931 * NB: both 1->0 and 0->1 transitions are counted except for
2932 * RI where only 0->1 is counted.
2933 */
msgl_get_icount(struct tty_struct * tty,struct serial_icounter_struct * icount)2934 static int msgl_get_icount(struct tty_struct *tty,
2935 struct serial_icounter_struct *icount)
2936
2937 {
2938 struct mgsl_struct * info = tty->driver_data;
2939 struct mgsl_icount cnow; /* kernel counter temps */
2940 unsigned long flags;
2941
2942 spin_lock_irqsave(&info->irq_spinlock,flags);
2943 cnow = info->icount;
2944 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2945
2946 icount->cts = cnow.cts;
2947 icount->dsr = cnow.dsr;
2948 icount->rng = cnow.rng;
2949 icount->dcd = cnow.dcd;
2950 icount->rx = cnow.rx;
2951 icount->tx = cnow.tx;
2952 icount->frame = cnow.frame;
2953 icount->overrun = cnow.overrun;
2954 icount->parity = cnow.parity;
2955 icount->brk = cnow.brk;
2956 icount->buf_overrun = cnow.buf_overrun;
2957 return 0;
2958 }
2959
2960 /* mgsl_ioctl() Service an IOCTL request
2961 *
2962 * Arguments:
2963 *
2964 * tty pointer to tty instance data
2965 * cmd IOCTL command code
2966 * arg command argument/context
2967 *
2968 * Return Value: 0 if success, otherwise error code
2969 */
mgsl_ioctl(struct tty_struct * tty,unsigned int cmd,unsigned long arg)2970 static int mgsl_ioctl(struct tty_struct *tty,
2971 unsigned int cmd, unsigned long arg)
2972 {
2973 struct mgsl_struct * info = tty->driver_data;
2974
2975 if (debug_level >= DEBUG_LEVEL_INFO)
2976 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2977 info->device_name, cmd );
2978
2979 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2980 return -ENODEV;
2981
2982 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2983 (cmd != TIOCMIWAIT)) {
2984 if (tty->flags & (1 << TTY_IO_ERROR))
2985 return -EIO;
2986 }
2987
2988 return mgsl_ioctl_common(info, cmd, arg);
2989 }
2990
mgsl_ioctl_common(struct mgsl_struct * info,unsigned int cmd,unsigned long arg)2991 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2992 {
2993 void __user *argp = (void __user *)arg;
2994
2995 switch (cmd) {
2996 case MGSL_IOCGPARAMS:
2997 return mgsl_get_params(info, argp);
2998 case MGSL_IOCSPARAMS:
2999 return mgsl_set_params(info, argp);
3000 case MGSL_IOCGTXIDLE:
3001 return mgsl_get_txidle(info, argp);
3002 case MGSL_IOCSTXIDLE:
3003 return mgsl_set_txidle(info,(int)arg);
3004 case MGSL_IOCTXENABLE:
3005 return mgsl_txenable(info,(int)arg);
3006 case MGSL_IOCRXENABLE:
3007 return mgsl_rxenable(info,(int)arg);
3008 case MGSL_IOCTXABORT:
3009 return mgsl_txabort(info);
3010 case MGSL_IOCGSTATS:
3011 return mgsl_get_stats(info, argp);
3012 case MGSL_IOCWAITEVENT:
3013 return mgsl_wait_event(info, argp);
3014 case MGSL_IOCLOOPTXDONE:
3015 return mgsl_loopmode_send_done(info);
3016 /* Wait for modem input (DCD,RI,DSR,CTS) change
3017 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3018 */
3019 case TIOCMIWAIT:
3020 return modem_input_wait(info,(int)arg);
3021
3022 default:
3023 return -ENOIOCTLCMD;
3024 }
3025 return 0;
3026 }
3027
3028 /* mgsl_set_termios()
3029 *
3030 * Set new termios settings
3031 *
3032 * Arguments:
3033 *
3034 * tty pointer to tty structure
3035 * termios pointer to buffer to hold returned old termios
3036 *
3037 * Return Value: None
3038 */
mgsl_set_termios(struct tty_struct * tty,struct ktermios * old_termios)3039 static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3040 {
3041 struct mgsl_struct *info = tty->driver_data;
3042 unsigned long flags;
3043
3044 if (debug_level >= DEBUG_LEVEL_INFO)
3045 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3046 tty->driver->name );
3047
3048 mgsl_change_params(info);
3049
3050 /* Handle transition to B0 status */
3051 if (old_termios->c_cflag & CBAUD &&
3052 !(tty->termios->c_cflag & CBAUD)) {
3053 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3054 spin_lock_irqsave(&info->irq_spinlock,flags);
3055 usc_set_serial_signals(info);
3056 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3057 }
3058
3059 /* Handle transition away from B0 status */
3060 if (!(old_termios->c_cflag & CBAUD) &&
3061 tty->termios->c_cflag & CBAUD) {
3062 info->serial_signals |= SerialSignal_DTR;
3063 if (!(tty->termios->c_cflag & CRTSCTS) ||
3064 !test_bit(TTY_THROTTLED, &tty->flags)) {
3065 info->serial_signals |= SerialSignal_RTS;
3066 }
3067 spin_lock_irqsave(&info->irq_spinlock,flags);
3068 usc_set_serial_signals(info);
3069 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3070 }
3071
3072 /* Handle turning off CRTSCTS */
3073 if (old_termios->c_cflag & CRTSCTS &&
3074 !(tty->termios->c_cflag & CRTSCTS)) {
3075 tty->hw_stopped = 0;
3076 mgsl_start(tty);
3077 }
3078
3079 } /* end of mgsl_set_termios() */
3080
3081 /* mgsl_close()
3082 *
3083 * Called when port is closed. Wait for remaining data to be
3084 * sent. Disable port and free resources.
3085 *
3086 * Arguments:
3087 *
3088 * tty pointer to open tty structure
3089 * filp pointer to open file object
3090 *
3091 * Return Value: None
3092 */
mgsl_close(struct tty_struct * tty,struct file * filp)3093 static void mgsl_close(struct tty_struct *tty, struct file * filp)
3094 {
3095 struct mgsl_struct * info = tty->driver_data;
3096
3097 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3098 return;
3099
3100 if (debug_level >= DEBUG_LEVEL_INFO)
3101 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3102 __FILE__,__LINE__, info->device_name, info->port.count);
3103
3104 if (tty_port_close_start(&info->port, tty, filp) == 0)
3105 goto cleanup;
3106
3107 mutex_lock(&info->port.mutex);
3108 if (info->port.flags & ASYNC_INITIALIZED)
3109 mgsl_wait_until_sent(tty, info->timeout);
3110 mgsl_flush_buffer(tty);
3111 tty_ldisc_flush(tty);
3112 shutdown(info);
3113 mutex_unlock(&info->port.mutex);
3114
3115 tty_port_close_end(&info->port, tty);
3116 info->port.tty = NULL;
3117 cleanup:
3118 if (debug_level >= DEBUG_LEVEL_INFO)
3119 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3120 tty->driver->name, info->port.count);
3121
3122 } /* end of mgsl_close() */
3123
3124 /* mgsl_wait_until_sent()
3125 *
3126 * Wait until the transmitter is empty.
3127 *
3128 * Arguments:
3129 *
3130 * tty pointer to tty info structure
3131 * timeout time to wait for send completion
3132 *
3133 * Return Value: None
3134 */
mgsl_wait_until_sent(struct tty_struct * tty,int timeout)3135 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3136 {
3137 struct mgsl_struct * info = tty->driver_data;
3138 unsigned long orig_jiffies, char_time;
3139
3140 if (!info )
3141 return;
3142
3143 if (debug_level >= DEBUG_LEVEL_INFO)
3144 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3145 __FILE__,__LINE__, info->device_name );
3146
3147 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3148 return;
3149
3150 if (!(info->port.flags & ASYNC_INITIALIZED))
3151 goto exit;
3152
3153 orig_jiffies = jiffies;
3154
3155 /* Set check interval to 1/5 of estimated time to
3156 * send a character, and make it at least 1. The check
3157 * interval should also be less than the timeout.
3158 * Note: use tight timings here to satisfy the NIST-PCTS.
3159 */
3160
3161 if ( info->params.data_rate ) {
3162 char_time = info->timeout/(32 * 5);
3163 if (!char_time)
3164 char_time++;
3165 } else
3166 char_time = 1;
3167
3168 if (timeout)
3169 char_time = min_t(unsigned long, char_time, timeout);
3170
3171 if ( info->params.mode == MGSL_MODE_HDLC ||
3172 info->params.mode == MGSL_MODE_RAW ) {
3173 while (info->tx_active) {
3174 msleep_interruptible(jiffies_to_msecs(char_time));
3175 if (signal_pending(current))
3176 break;
3177 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3178 break;
3179 }
3180 } else {
3181 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3182 info->tx_enabled) {
3183 msleep_interruptible(jiffies_to_msecs(char_time));
3184 if (signal_pending(current))
3185 break;
3186 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3187 break;
3188 }
3189 }
3190
3191 exit:
3192 if (debug_level >= DEBUG_LEVEL_INFO)
3193 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3194 __FILE__,__LINE__, info->device_name );
3195
3196 } /* end of mgsl_wait_until_sent() */
3197
3198 /* mgsl_hangup()
3199 *
3200 * Called by tty_hangup() when a hangup is signaled.
3201 * This is the same as to closing all open files for the port.
3202 *
3203 * Arguments: tty pointer to associated tty object
3204 * Return Value: None
3205 */
mgsl_hangup(struct tty_struct * tty)3206 static void mgsl_hangup(struct tty_struct *tty)
3207 {
3208 struct mgsl_struct * info = tty->driver_data;
3209
3210 if (debug_level >= DEBUG_LEVEL_INFO)
3211 printk("%s(%d):mgsl_hangup(%s)\n",
3212 __FILE__,__LINE__, info->device_name );
3213
3214 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3215 return;
3216
3217 mgsl_flush_buffer(tty);
3218 shutdown(info);
3219
3220 info->port.count = 0;
3221 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
3222 info->port.tty = NULL;
3223
3224 wake_up_interruptible(&info->port.open_wait);
3225
3226 } /* end of mgsl_hangup() */
3227
3228 /*
3229 * carrier_raised()
3230 *
3231 * Return true if carrier is raised
3232 */
3233
carrier_raised(struct tty_port * port)3234 static int carrier_raised(struct tty_port *port)
3235 {
3236 unsigned long flags;
3237 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3238
3239 spin_lock_irqsave(&info->irq_spinlock, flags);
3240 usc_get_serial_signals(info);
3241 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3242 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3243 }
3244
dtr_rts(struct tty_port * port,int on)3245 static void dtr_rts(struct tty_port *port, int on)
3246 {
3247 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3248 unsigned long flags;
3249
3250 spin_lock_irqsave(&info->irq_spinlock,flags);
3251 if (on)
3252 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3253 else
3254 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3255 usc_set_serial_signals(info);
3256 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3257 }
3258
3259
3260 /* block_til_ready()
3261 *
3262 * Block the current process until the specified port
3263 * is ready to be opened.
3264 *
3265 * Arguments:
3266 *
3267 * tty pointer to tty info structure
3268 * filp pointer to open file object
3269 * info pointer to device instance data
3270 *
3271 * Return Value: 0 if success, otherwise error code
3272 */
block_til_ready(struct tty_struct * tty,struct file * filp,struct mgsl_struct * info)3273 static int block_til_ready(struct tty_struct *tty, struct file * filp,
3274 struct mgsl_struct *info)
3275 {
3276 DECLARE_WAITQUEUE(wait, current);
3277 int retval;
3278 bool do_clocal = false;
3279 bool extra_count = false;
3280 unsigned long flags;
3281 int dcd;
3282 struct tty_port *port = &info->port;
3283
3284 if (debug_level >= DEBUG_LEVEL_INFO)
3285 printk("%s(%d):block_til_ready on %s\n",
3286 __FILE__,__LINE__, tty->driver->name );
3287
3288 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3289 /* nonblock mode is set or port is not enabled */
3290 port->flags |= ASYNC_NORMAL_ACTIVE;
3291 return 0;
3292 }
3293
3294 if (tty->termios->c_cflag & CLOCAL)
3295 do_clocal = true;
3296
3297 /* Wait for carrier detect and the line to become
3298 * free (i.e., not in use by the callout). While we are in
3299 * this loop, port->count is dropped by one, so that
3300 * mgsl_close() knows when to free things. We restore it upon
3301 * exit, either normal or abnormal.
3302 */
3303
3304 retval = 0;
3305 add_wait_queue(&port->open_wait, &wait);
3306
3307 if (debug_level >= DEBUG_LEVEL_INFO)
3308 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3309 __FILE__,__LINE__, tty->driver->name, port->count );
3310
3311 spin_lock_irqsave(&info->irq_spinlock, flags);
3312 if (!tty_hung_up_p(filp)) {
3313 extra_count = true;
3314 port->count--;
3315 }
3316 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3317 port->blocked_open++;
3318
3319 while (1) {
3320 if (tty->termios->c_cflag & CBAUD)
3321 tty_port_raise_dtr_rts(port);
3322
3323 set_current_state(TASK_INTERRUPTIBLE);
3324
3325 if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3326 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3327 -EAGAIN : -ERESTARTSYS;
3328 break;
3329 }
3330
3331 dcd = tty_port_carrier_raised(&info->port);
3332
3333 if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd))
3334 break;
3335
3336 if (signal_pending(current)) {
3337 retval = -ERESTARTSYS;
3338 break;
3339 }
3340
3341 if (debug_level >= DEBUG_LEVEL_INFO)
3342 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3343 __FILE__,__LINE__, tty->driver->name, port->count );
3344
3345 tty_unlock();
3346 schedule();
3347 tty_lock();
3348 }
3349
3350 set_current_state(TASK_RUNNING);
3351 remove_wait_queue(&port->open_wait, &wait);
3352
3353 /* FIXME: Racy on hangup during close wait */
3354 if (extra_count)
3355 port->count++;
3356 port->blocked_open--;
3357
3358 if (debug_level >= DEBUG_LEVEL_INFO)
3359 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3360 __FILE__,__LINE__, tty->driver->name, port->count );
3361
3362 if (!retval)
3363 port->flags |= ASYNC_NORMAL_ACTIVE;
3364
3365 return retval;
3366
3367 } /* end of block_til_ready() */
3368
3369 /* mgsl_open()
3370 *
3371 * Called when a port is opened. Init and enable port.
3372 * Perform serial-specific initialization for the tty structure.
3373 *
3374 * Arguments: tty pointer to tty info structure
3375 * filp associated file pointer
3376 *
3377 * Return Value: 0 if success, otherwise error code
3378 */
mgsl_open(struct tty_struct * tty,struct file * filp)3379 static int mgsl_open(struct tty_struct *tty, struct file * filp)
3380 {
3381 struct mgsl_struct *info;
3382 int retval, line;
3383 unsigned long flags;
3384
3385 /* verify range of specified line number */
3386 line = tty->index;
3387 if ((line < 0) || (line >= mgsl_device_count)) {
3388 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3389 __FILE__,__LINE__,line);
3390 return -ENODEV;
3391 }
3392
3393 /* find the info structure for the specified line */
3394 info = mgsl_device_list;
3395 while(info && info->line != line)
3396 info = info->next_device;
3397 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3398 return -ENODEV;
3399
3400 tty->driver_data = info;
3401 info->port.tty = tty;
3402
3403 if (debug_level >= DEBUG_LEVEL_INFO)
3404 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3405 __FILE__,__LINE__,tty->driver->name, info->port.count);
3406
3407 /* If port is closing, signal caller to try again */
3408 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
3409 if (info->port.flags & ASYNC_CLOSING)
3410 interruptible_sleep_on(&info->port.close_wait);
3411 retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
3412 -EAGAIN : -ERESTARTSYS);
3413 goto cleanup;
3414 }
3415
3416 info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3417
3418 spin_lock_irqsave(&info->netlock, flags);
3419 if (info->netcount) {
3420 retval = -EBUSY;
3421 spin_unlock_irqrestore(&info->netlock, flags);
3422 goto cleanup;
3423 }
3424 info->port.count++;
3425 spin_unlock_irqrestore(&info->netlock, flags);
3426
3427 if (info->port.count == 1) {
3428 /* 1st open on this device, init hardware */
3429 retval = startup(info);
3430 if (retval < 0)
3431 goto cleanup;
3432 }
3433
3434 retval = block_til_ready(tty, filp, info);
3435 if (retval) {
3436 if (debug_level >= DEBUG_LEVEL_INFO)
3437 printk("%s(%d):block_til_ready(%s) returned %d\n",
3438 __FILE__,__LINE__, info->device_name, retval);
3439 goto cleanup;
3440 }
3441
3442 if (debug_level >= DEBUG_LEVEL_INFO)
3443 printk("%s(%d):mgsl_open(%s) success\n",
3444 __FILE__,__LINE__, info->device_name);
3445 retval = 0;
3446
3447 cleanup:
3448 if (retval) {
3449 if (tty->count == 1)
3450 info->port.tty = NULL; /* tty layer will release tty struct */
3451 if(info->port.count)
3452 info->port.count--;
3453 }
3454
3455 return retval;
3456
3457 } /* end of mgsl_open() */
3458
3459 /*
3460 * /proc fs routines....
3461 */
3462
line_info(struct seq_file * m,struct mgsl_struct * info)3463 static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3464 {
3465 char stat_buf[30];
3466 unsigned long flags;
3467
3468 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3469 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3470 info->device_name, info->io_base, info->irq_level,
3471 info->phys_memory_base, info->phys_lcr_base);
3472 } else {
3473 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3474 info->device_name, info->io_base,
3475 info->irq_level, info->dma_level);
3476 }
3477
3478 /* output current serial signal states */
3479 spin_lock_irqsave(&info->irq_spinlock,flags);
3480 usc_get_serial_signals(info);
3481 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3482
3483 stat_buf[0] = 0;
3484 stat_buf[1] = 0;
3485 if (info->serial_signals & SerialSignal_RTS)
3486 strcat(stat_buf, "|RTS");
3487 if (info->serial_signals & SerialSignal_CTS)
3488 strcat(stat_buf, "|CTS");
3489 if (info->serial_signals & SerialSignal_DTR)
3490 strcat(stat_buf, "|DTR");
3491 if (info->serial_signals & SerialSignal_DSR)
3492 strcat(stat_buf, "|DSR");
3493 if (info->serial_signals & SerialSignal_DCD)
3494 strcat(stat_buf, "|CD");
3495 if (info->serial_signals & SerialSignal_RI)
3496 strcat(stat_buf, "|RI");
3497
3498 if (info->params.mode == MGSL_MODE_HDLC ||
3499 info->params.mode == MGSL_MODE_RAW ) {
3500 seq_printf(m, " HDLC txok:%d rxok:%d",
3501 info->icount.txok, info->icount.rxok);
3502 if (info->icount.txunder)
3503 seq_printf(m, " txunder:%d", info->icount.txunder);
3504 if (info->icount.txabort)
3505 seq_printf(m, " txabort:%d", info->icount.txabort);
3506 if (info->icount.rxshort)
3507 seq_printf(m, " rxshort:%d", info->icount.rxshort);
3508 if (info->icount.rxlong)
3509 seq_printf(m, " rxlong:%d", info->icount.rxlong);
3510 if (info->icount.rxover)
3511 seq_printf(m, " rxover:%d", info->icount.rxover);
3512 if (info->icount.rxcrc)
3513 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3514 } else {
3515 seq_printf(m, " ASYNC tx:%d rx:%d",
3516 info->icount.tx, info->icount.rx);
3517 if (info->icount.frame)
3518 seq_printf(m, " fe:%d", info->icount.frame);
3519 if (info->icount.parity)
3520 seq_printf(m, " pe:%d", info->icount.parity);
3521 if (info->icount.brk)
3522 seq_printf(m, " brk:%d", info->icount.brk);
3523 if (info->icount.overrun)
3524 seq_printf(m, " oe:%d", info->icount.overrun);
3525 }
3526
3527 /* Append serial signal status to end */
3528 seq_printf(m, " %s\n", stat_buf+1);
3529
3530 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3531 info->tx_active,info->bh_requested,info->bh_running,
3532 info->pending_bh);
3533
3534 spin_lock_irqsave(&info->irq_spinlock,flags);
3535 {
3536 u16 Tcsr = usc_InReg( info, TCSR );
3537 u16 Tdmr = usc_InDmaReg( info, TDMR );
3538 u16 Ticr = usc_InReg( info, TICR );
3539 u16 Rscr = usc_InReg( info, RCSR );
3540 u16 Rdmr = usc_InDmaReg( info, RDMR );
3541 u16 Ricr = usc_InReg( info, RICR );
3542 u16 Icr = usc_InReg( info, ICR );
3543 u16 Dccr = usc_InReg( info, DCCR );
3544 u16 Tmr = usc_InReg( info, TMR );
3545 u16 Tccr = usc_InReg( info, TCCR );
3546 u16 Ccar = inw( info->io_base + CCAR );
3547 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3548 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3549 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3550 }
3551 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3552 }
3553
3554 /* Called to print information about devices */
mgsl_proc_show(struct seq_file * m,void * v)3555 static int mgsl_proc_show(struct seq_file *m, void *v)
3556 {
3557 struct mgsl_struct *info;
3558
3559 seq_printf(m, "synclink driver:%s\n", driver_version);
3560
3561 info = mgsl_device_list;
3562 while( info ) {
3563 line_info(m, info);
3564 info = info->next_device;
3565 }
3566 return 0;
3567 }
3568
mgsl_proc_open(struct inode * inode,struct file * file)3569 static int mgsl_proc_open(struct inode *inode, struct file *file)
3570 {
3571 return single_open(file, mgsl_proc_show, NULL);
3572 }
3573
3574 static const struct file_operations mgsl_proc_fops = {
3575 .owner = THIS_MODULE,
3576 .open = mgsl_proc_open,
3577 .read = seq_read,
3578 .llseek = seq_lseek,
3579 .release = single_release,
3580 };
3581
3582 /* mgsl_allocate_dma_buffers()
3583 *
3584 * Allocate and format DMA buffers (ISA adapter)
3585 * or format shared memory buffers (PCI adapter).
3586 *
3587 * Arguments: info pointer to device instance data
3588 * Return Value: 0 if success, otherwise error
3589 */
mgsl_allocate_dma_buffers(struct mgsl_struct * info)3590 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3591 {
3592 unsigned short BuffersPerFrame;
3593
3594 info->last_mem_alloc = 0;
3595
3596 /* Calculate the number of DMA buffers necessary to hold the */
3597 /* largest allowable frame size. Note: If the max frame size is */
3598 /* not an even multiple of the DMA buffer size then we need to */
3599 /* round the buffer count per frame up one. */
3600
3601 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3602 if ( info->max_frame_size % DMABUFFERSIZE )
3603 BuffersPerFrame++;
3604
3605 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3606 /*
3607 * The PCI adapter has 256KBytes of shared memory to use.
3608 * This is 64 PAGE_SIZE buffers.
3609 *
3610 * The first page is used for padding at this time so the
3611 * buffer list does not begin at offset 0 of the PCI
3612 * adapter's shared memory.
3613 *
3614 * The 2nd page is used for the buffer list. A 4K buffer
3615 * list can hold 128 DMA_BUFFER structures at 32 bytes
3616 * each.
3617 *
3618 * This leaves 62 4K pages.
3619 *
3620 * The next N pages are used for transmit frame(s). We
3621 * reserve enough 4K page blocks to hold the required
3622 * number of transmit dma buffers (num_tx_dma_buffers),
3623 * each of MaxFrameSize size.
3624 *
3625 * Of the remaining pages (62-N), determine how many can
3626 * be used to receive full MaxFrameSize inbound frames
3627 */
3628 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3629 info->rx_buffer_count = 62 - info->tx_buffer_count;
3630 } else {
3631 /* Calculate the number of PAGE_SIZE buffers needed for */
3632 /* receive and transmit DMA buffers. */
3633
3634
3635 /* Calculate the number of DMA buffers necessary to */
3636 /* hold 7 max size receive frames and one max size transmit frame. */
3637 /* The receive buffer count is bumped by one so we avoid an */
3638 /* End of List condition if all receive buffers are used when */
3639 /* using linked list DMA buffers. */
3640
3641 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3642 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3643
3644 /*
3645 * limit total TxBuffers & RxBuffers to 62 4K total
3646 * (ala PCI Allocation)
3647 */
3648
3649 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3650 info->rx_buffer_count = 62 - info->tx_buffer_count;
3651
3652 }
3653
3654 if ( debug_level >= DEBUG_LEVEL_INFO )
3655 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3656 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3657
3658 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3659 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3660 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3661 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3662 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3663 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3664 return -ENOMEM;
3665 }
3666
3667 mgsl_reset_rx_dma_buffers( info );
3668 mgsl_reset_tx_dma_buffers( info );
3669
3670 return 0;
3671
3672 } /* end of mgsl_allocate_dma_buffers() */
3673
3674 /*
3675 * mgsl_alloc_buffer_list_memory()
3676 *
3677 * Allocate a common DMA buffer for use as the
3678 * receive and transmit buffer lists.
3679 *
3680 * A buffer list is a set of buffer entries where each entry contains
3681 * a pointer to an actual buffer and a pointer to the next buffer entry
3682 * (plus some other info about the buffer).
3683 *
3684 * The buffer entries for a list are built to form a circular list so
3685 * that when the entire list has been traversed you start back at the
3686 * beginning.
3687 *
3688 * This function allocates memory for just the buffer entries.
3689 * The links (pointer to next entry) are filled in with the physical
3690 * address of the next entry so the adapter can navigate the list
3691 * using bus master DMA. The pointers to the actual buffers are filled
3692 * out later when the actual buffers are allocated.
3693 *
3694 * Arguments: info pointer to device instance data
3695 * Return Value: 0 if success, otherwise error
3696 */
mgsl_alloc_buffer_list_memory(struct mgsl_struct * info)3697 static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3698 {
3699 unsigned int i;
3700
3701 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3702 /* PCI adapter uses shared memory. */
3703 info->buffer_list = info->memory_base + info->last_mem_alloc;
3704 info->buffer_list_phys = info->last_mem_alloc;
3705 info->last_mem_alloc += BUFFERLISTSIZE;
3706 } else {
3707 /* ISA adapter uses system memory. */
3708 /* The buffer lists are allocated as a common buffer that both */
3709 /* the processor and adapter can access. This allows the driver to */
3710 /* inspect portions of the buffer while other portions are being */
3711 /* updated by the adapter using Bus Master DMA. */
3712
3713 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3714 if (info->buffer_list == NULL)
3715 return -ENOMEM;
3716 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3717 }
3718
3719 /* We got the memory for the buffer entry lists. */
3720 /* Initialize the memory block to all zeros. */
3721 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3722
3723 /* Save virtual address pointers to the receive and */
3724 /* transmit buffer lists. (Receive 1st). These pointers will */
3725 /* be used by the processor to access the lists. */
3726 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3727 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3728 info->tx_buffer_list += info->rx_buffer_count;
3729
3730 /*
3731 * Build the links for the buffer entry lists such that
3732 * two circular lists are built. (Transmit and Receive).
3733 *
3734 * Note: the links are physical addresses
3735 * which are read by the adapter to determine the next
3736 * buffer entry to use.
3737 */
3738
3739 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3740 /* calculate and store physical address of this buffer entry */
3741 info->rx_buffer_list[i].phys_entry =
3742 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3743
3744 /* calculate and store physical address of */
3745 /* next entry in cirular list of entries */
3746
3747 info->rx_buffer_list[i].link = info->buffer_list_phys;
3748
3749 if ( i < info->rx_buffer_count - 1 )
3750 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3751 }
3752
3753 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3754 /* calculate and store physical address of this buffer entry */
3755 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3756 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3757
3758 /* calculate and store physical address of */
3759 /* next entry in cirular list of entries */
3760
3761 info->tx_buffer_list[i].link = info->buffer_list_phys +
3762 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3763
3764 if ( i < info->tx_buffer_count - 1 )
3765 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3766 }
3767
3768 return 0;
3769
3770 } /* end of mgsl_alloc_buffer_list_memory() */
3771
3772 /* Free DMA buffers allocated for use as the
3773 * receive and transmit buffer lists.
3774 * Warning:
3775 *
3776 * The data transfer buffers associated with the buffer list
3777 * MUST be freed before freeing the buffer list itself because
3778 * the buffer list contains the information necessary to free
3779 * the individual buffers!
3780 */
mgsl_free_buffer_list_memory(struct mgsl_struct * info)3781 static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3782 {
3783 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3784 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3785
3786 info->buffer_list = NULL;
3787 info->rx_buffer_list = NULL;
3788 info->tx_buffer_list = NULL;
3789
3790 } /* end of mgsl_free_buffer_list_memory() */
3791
3792 /*
3793 * mgsl_alloc_frame_memory()
3794 *
3795 * Allocate the frame DMA buffers used by the specified buffer list.
3796 * Each DMA buffer will be one memory page in size. This is necessary
3797 * because memory can fragment enough that it may be impossible
3798 * contiguous pages.
3799 *
3800 * Arguments:
3801 *
3802 * info pointer to device instance data
3803 * BufferList pointer to list of buffer entries
3804 * Buffercount count of buffer entries in buffer list
3805 *
3806 * Return Value: 0 if success, otherwise -ENOMEM
3807 */
mgsl_alloc_frame_memory(struct mgsl_struct * info,DMABUFFERENTRY * BufferList,int Buffercount)3808 static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3809 {
3810 int i;
3811 u32 phys_addr;
3812
3813 /* Allocate page sized buffers for the receive buffer list */
3814
3815 for ( i = 0; i < Buffercount; i++ ) {
3816 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3817 /* PCI adapter uses shared memory buffers. */
3818 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3819 phys_addr = info->last_mem_alloc;
3820 info->last_mem_alloc += DMABUFFERSIZE;
3821 } else {
3822 /* ISA adapter uses system memory. */
3823 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3824 if (BufferList[i].virt_addr == NULL)
3825 return -ENOMEM;
3826 phys_addr = (u32)(BufferList[i].dma_addr);
3827 }
3828 BufferList[i].phys_addr = phys_addr;
3829 }
3830
3831 return 0;
3832
3833 } /* end of mgsl_alloc_frame_memory() */
3834
3835 /*
3836 * mgsl_free_frame_memory()
3837 *
3838 * Free the buffers associated with
3839 * each buffer entry of a buffer list.
3840 *
3841 * Arguments:
3842 *
3843 * info pointer to device instance data
3844 * BufferList pointer to list of buffer entries
3845 * Buffercount count of buffer entries in buffer list
3846 *
3847 * Return Value: None
3848 */
mgsl_free_frame_memory(struct mgsl_struct * info,DMABUFFERENTRY * BufferList,int Buffercount)3849 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3850 {
3851 int i;
3852
3853 if ( BufferList ) {
3854 for ( i = 0 ; i < Buffercount ; i++ ) {
3855 if ( BufferList[i].virt_addr ) {
3856 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3857 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3858 BufferList[i].virt_addr = NULL;
3859 }
3860 }
3861 }
3862
3863 } /* end of mgsl_free_frame_memory() */
3864
3865 /* mgsl_free_dma_buffers()
3866 *
3867 * Free DMA buffers
3868 *
3869 * Arguments: info pointer to device instance data
3870 * Return Value: None
3871 */
mgsl_free_dma_buffers(struct mgsl_struct * info)3872 static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3873 {
3874 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3875 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3876 mgsl_free_buffer_list_memory( info );
3877
3878 } /* end of mgsl_free_dma_buffers() */
3879
3880
3881 /*
3882 * mgsl_alloc_intermediate_rxbuffer_memory()
3883 *
3884 * Allocate a buffer large enough to hold max_frame_size. This buffer
3885 * is used to pass an assembled frame to the line discipline.
3886 *
3887 * Arguments:
3888 *
3889 * info pointer to device instance data
3890 *
3891 * Return Value: 0 if success, otherwise -ENOMEM
3892 */
mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct * info)3893 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3894 {
3895 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3896 if ( info->intermediate_rxbuffer == NULL )
3897 return -ENOMEM;
3898
3899 return 0;
3900
3901 } /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3902
3903 /*
3904 * mgsl_free_intermediate_rxbuffer_memory()
3905 *
3906 *
3907 * Arguments:
3908 *
3909 * info pointer to device instance data
3910 *
3911 * Return Value: None
3912 */
mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct * info)3913 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3914 {
3915 kfree(info->intermediate_rxbuffer);
3916 info->intermediate_rxbuffer = NULL;
3917
3918 } /* end of mgsl_free_intermediate_rxbuffer_memory() */
3919
3920 /*
3921 * mgsl_alloc_intermediate_txbuffer_memory()
3922 *
3923 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3924 * This buffer is used to load transmit frames into the adapter's dma transfer
3925 * buffers when there is sufficient space.
3926 *
3927 * Arguments:
3928 *
3929 * info pointer to device instance data
3930 *
3931 * Return Value: 0 if success, otherwise -ENOMEM
3932 */
mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct * info)3933 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3934 {
3935 int i;
3936
3937 if ( debug_level >= DEBUG_LEVEL_INFO )
3938 printk("%s %s(%d) allocating %d tx holding buffers\n",
3939 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3940
3941 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3942
3943 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3944 info->tx_holding_buffers[i].buffer =
3945 kmalloc(info->max_frame_size, GFP_KERNEL);
3946 if (info->tx_holding_buffers[i].buffer == NULL) {
3947 for (--i; i >= 0; i--) {
3948 kfree(info->tx_holding_buffers[i].buffer);
3949 info->tx_holding_buffers[i].buffer = NULL;
3950 }
3951 return -ENOMEM;
3952 }
3953 }
3954
3955 return 0;
3956
3957 } /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3958
3959 /*
3960 * mgsl_free_intermediate_txbuffer_memory()
3961 *
3962 *
3963 * Arguments:
3964 *
3965 * info pointer to device instance data
3966 *
3967 * Return Value: None
3968 */
mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct * info)3969 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3970 {
3971 int i;
3972
3973 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3974 kfree(info->tx_holding_buffers[i].buffer);
3975 info->tx_holding_buffers[i].buffer = NULL;
3976 }
3977
3978 info->get_tx_holding_index = 0;
3979 info->put_tx_holding_index = 0;
3980 info->tx_holding_count = 0;
3981
3982 } /* end of mgsl_free_intermediate_txbuffer_memory() */
3983
3984
3985 /*
3986 * load_next_tx_holding_buffer()
3987 *
3988 * attempts to load the next buffered tx request into the
3989 * tx dma buffers
3990 *
3991 * Arguments:
3992 *
3993 * info pointer to device instance data
3994 *
3995 * Return Value: true if next buffered tx request loaded
3996 * into adapter's tx dma buffer,
3997 * false otherwise
3998 */
load_next_tx_holding_buffer(struct mgsl_struct * info)3999 static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
4000 {
4001 bool ret = false;
4002
4003 if ( info->tx_holding_count ) {
4004 /* determine if we have enough tx dma buffers
4005 * to accommodate the next tx frame
4006 */
4007 struct tx_holding_buffer *ptx =
4008 &info->tx_holding_buffers[info->get_tx_holding_index];
4009 int num_free = num_free_tx_dma_buffers(info);
4010 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4011 if ( ptx->buffer_size % DMABUFFERSIZE )
4012 ++num_needed;
4013
4014 if (num_needed <= num_free) {
4015 info->xmit_cnt = ptx->buffer_size;
4016 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4017
4018 --info->tx_holding_count;
4019 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4020 info->get_tx_holding_index=0;
4021
4022 /* restart transmit timer */
4023 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4024
4025 ret = true;
4026 }
4027 }
4028
4029 return ret;
4030 }
4031
4032 /*
4033 * save_tx_buffer_request()
4034 *
4035 * attempt to store transmit frame request for later transmission
4036 *
4037 * Arguments:
4038 *
4039 * info pointer to device instance data
4040 * Buffer pointer to buffer containing frame to load
4041 * BufferSize size in bytes of frame in Buffer
4042 *
4043 * Return Value: 1 if able to store, 0 otherwise
4044 */
save_tx_buffer_request(struct mgsl_struct * info,const char * Buffer,unsigned int BufferSize)4045 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4046 {
4047 struct tx_holding_buffer *ptx;
4048
4049 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4050 return 0; /* all buffers in use */
4051 }
4052
4053 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4054 ptx->buffer_size = BufferSize;
4055 memcpy( ptx->buffer, Buffer, BufferSize);
4056
4057 ++info->tx_holding_count;
4058 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4059 info->put_tx_holding_index=0;
4060
4061 return 1;
4062 }
4063
mgsl_claim_resources(struct mgsl_struct * info)4064 static int mgsl_claim_resources(struct mgsl_struct *info)
4065 {
4066 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4067 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4068 __FILE__,__LINE__,info->device_name, info->io_base);
4069 return -ENODEV;
4070 }
4071 info->io_addr_requested = true;
4072
4073 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4074 info->device_name, info ) < 0 ) {
4075 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4076 __FILE__,__LINE__,info->device_name, info->irq_level );
4077 goto errout;
4078 }
4079 info->irq_requested = true;
4080
4081 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4082 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4083 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4084 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4085 goto errout;
4086 }
4087 info->shared_mem_requested = true;
4088 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4089 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4090 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4091 goto errout;
4092 }
4093 info->lcr_mem_requested = true;
4094
4095 info->memory_base = ioremap_nocache(info->phys_memory_base,
4096 0x40000);
4097 if (!info->memory_base) {
4098 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4099 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4100 goto errout;
4101 }
4102
4103 if ( !mgsl_memory_test(info) ) {
4104 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4105 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4106 goto errout;
4107 }
4108
4109 info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4110 PAGE_SIZE);
4111 if (!info->lcr_base) {
4112 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4113 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4114 goto errout;
4115 }
4116 info->lcr_base += info->lcr_offset;
4117
4118 } else {
4119 /* claim DMA channel */
4120
4121 if (request_dma(info->dma_level,info->device_name) < 0){
4122 printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4123 __FILE__,__LINE__,info->device_name, info->dma_level );
4124 mgsl_release_resources( info );
4125 return -ENODEV;
4126 }
4127 info->dma_requested = true;
4128
4129 /* ISA adapter uses bus master DMA */
4130 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4131 enable_dma(info->dma_level);
4132 }
4133
4134 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4135 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4136 __FILE__,__LINE__,info->device_name, info->dma_level );
4137 goto errout;
4138 }
4139
4140 return 0;
4141 errout:
4142 mgsl_release_resources(info);
4143 return -ENODEV;
4144
4145 } /* end of mgsl_claim_resources() */
4146
mgsl_release_resources(struct mgsl_struct * info)4147 static void mgsl_release_resources(struct mgsl_struct *info)
4148 {
4149 if ( debug_level >= DEBUG_LEVEL_INFO )
4150 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4151 __FILE__,__LINE__,info->device_name );
4152
4153 if ( info->irq_requested ) {
4154 free_irq(info->irq_level, info);
4155 info->irq_requested = false;
4156 }
4157 if ( info->dma_requested ) {
4158 disable_dma(info->dma_level);
4159 free_dma(info->dma_level);
4160 info->dma_requested = false;
4161 }
4162 mgsl_free_dma_buffers(info);
4163 mgsl_free_intermediate_rxbuffer_memory(info);
4164 mgsl_free_intermediate_txbuffer_memory(info);
4165
4166 if ( info->io_addr_requested ) {
4167 release_region(info->io_base,info->io_addr_size);
4168 info->io_addr_requested = false;
4169 }
4170 if ( info->shared_mem_requested ) {
4171 release_mem_region(info->phys_memory_base,0x40000);
4172 info->shared_mem_requested = false;
4173 }
4174 if ( info->lcr_mem_requested ) {
4175 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4176 info->lcr_mem_requested = false;
4177 }
4178 if (info->memory_base){
4179 iounmap(info->memory_base);
4180 info->memory_base = NULL;
4181 }
4182 if (info->lcr_base){
4183 iounmap(info->lcr_base - info->lcr_offset);
4184 info->lcr_base = NULL;
4185 }
4186
4187 if ( debug_level >= DEBUG_LEVEL_INFO )
4188 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4189 __FILE__,__LINE__,info->device_name );
4190
4191 } /* end of mgsl_release_resources() */
4192
4193 /* mgsl_add_device()
4194 *
4195 * Add the specified device instance data structure to the
4196 * global linked list of devices and increment the device count.
4197 *
4198 * Arguments: info pointer to device instance data
4199 * Return Value: None
4200 */
mgsl_add_device(struct mgsl_struct * info)4201 static void mgsl_add_device( struct mgsl_struct *info )
4202 {
4203 info->next_device = NULL;
4204 info->line = mgsl_device_count;
4205 sprintf(info->device_name,"ttySL%d",info->line);
4206
4207 if (info->line < MAX_TOTAL_DEVICES) {
4208 if (maxframe[info->line])
4209 info->max_frame_size = maxframe[info->line];
4210
4211 if (txdmabufs[info->line]) {
4212 info->num_tx_dma_buffers = txdmabufs[info->line];
4213 if (info->num_tx_dma_buffers < 1)
4214 info->num_tx_dma_buffers = 1;
4215 }
4216
4217 if (txholdbufs[info->line]) {
4218 info->num_tx_holding_buffers = txholdbufs[info->line];
4219 if (info->num_tx_holding_buffers < 1)
4220 info->num_tx_holding_buffers = 1;
4221 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4222 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4223 }
4224 }
4225
4226 mgsl_device_count++;
4227
4228 if ( !mgsl_device_list )
4229 mgsl_device_list = info;
4230 else {
4231 struct mgsl_struct *current_dev = mgsl_device_list;
4232 while( current_dev->next_device )
4233 current_dev = current_dev->next_device;
4234 current_dev->next_device = info;
4235 }
4236
4237 if ( info->max_frame_size < 4096 )
4238 info->max_frame_size = 4096;
4239 else if ( info->max_frame_size > 65535 )
4240 info->max_frame_size = 65535;
4241
4242 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4243 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4244 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4245 info->phys_memory_base, info->phys_lcr_base,
4246 info->max_frame_size );
4247 } else {
4248 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4249 info->device_name, info->io_base, info->irq_level, info->dma_level,
4250 info->max_frame_size );
4251 }
4252
4253 #if SYNCLINK_GENERIC_HDLC
4254 hdlcdev_init(info);
4255 #endif
4256
4257 } /* end of mgsl_add_device() */
4258
4259 static const struct tty_port_operations mgsl_port_ops = {
4260 .carrier_raised = carrier_raised,
4261 .dtr_rts = dtr_rts,
4262 };
4263
4264
4265 /* mgsl_allocate_device()
4266 *
4267 * Allocate and initialize a device instance structure
4268 *
4269 * Arguments: none
4270 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4271 */
mgsl_allocate_device(void)4272 static struct mgsl_struct* mgsl_allocate_device(void)
4273 {
4274 struct mgsl_struct *info;
4275
4276 info = kzalloc(sizeof(struct mgsl_struct),
4277 GFP_KERNEL);
4278
4279 if (!info) {
4280 printk("Error can't allocate device instance data\n");
4281 } else {
4282 tty_port_init(&info->port);
4283 info->port.ops = &mgsl_port_ops;
4284 info->magic = MGSL_MAGIC;
4285 INIT_WORK(&info->task, mgsl_bh_handler);
4286 info->max_frame_size = 4096;
4287 info->port.close_delay = 5*HZ/10;
4288 info->port.closing_wait = 30*HZ;
4289 init_waitqueue_head(&info->status_event_wait_q);
4290 init_waitqueue_head(&info->event_wait_q);
4291 spin_lock_init(&info->irq_spinlock);
4292 spin_lock_init(&info->netlock);
4293 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4294 info->idle_mode = HDLC_TXIDLE_FLAGS;
4295 info->num_tx_dma_buffers = 1;
4296 info->num_tx_holding_buffers = 0;
4297 }
4298
4299 return info;
4300
4301 } /* end of mgsl_allocate_device()*/
4302
4303 static const struct tty_operations mgsl_ops = {
4304 .open = mgsl_open,
4305 .close = mgsl_close,
4306 .write = mgsl_write,
4307 .put_char = mgsl_put_char,
4308 .flush_chars = mgsl_flush_chars,
4309 .write_room = mgsl_write_room,
4310 .chars_in_buffer = mgsl_chars_in_buffer,
4311 .flush_buffer = mgsl_flush_buffer,
4312 .ioctl = mgsl_ioctl,
4313 .throttle = mgsl_throttle,
4314 .unthrottle = mgsl_unthrottle,
4315 .send_xchar = mgsl_send_xchar,
4316 .break_ctl = mgsl_break,
4317 .wait_until_sent = mgsl_wait_until_sent,
4318 .set_termios = mgsl_set_termios,
4319 .stop = mgsl_stop,
4320 .start = mgsl_start,
4321 .hangup = mgsl_hangup,
4322 .tiocmget = tiocmget,
4323 .tiocmset = tiocmset,
4324 .get_icount = msgl_get_icount,
4325 .proc_fops = &mgsl_proc_fops,
4326 };
4327
4328 /*
4329 * perform tty device initialization
4330 */
mgsl_init_tty(void)4331 static int mgsl_init_tty(void)
4332 {
4333 int rc;
4334
4335 serial_driver = alloc_tty_driver(128);
4336 if (!serial_driver)
4337 return -ENOMEM;
4338
4339 serial_driver->owner = THIS_MODULE;
4340 serial_driver->driver_name = "synclink";
4341 serial_driver->name = "ttySL";
4342 serial_driver->major = ttymajor;
4343 serial_driver->minor_start = 64;
4344 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4345 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4346 serial_driver->init_termios = tty_std_termios;
4347 serial_driver->init_termios.c_cflag =
4348 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4349 serial_driver->init_termios.c_ispeed = 9600;
4350 serial_driver->init_termios.c_ospeed = 9600;
4351 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4352 tty_set_operations(serial_driver, &mgsl_ops);
4353 if ((rc = tty_register_driver(serial_driver)) < 0) {
4354 printk("%s(%d):Couldn't register serial driver\n",
4355 __FILE__,__LINE__);
4356 put_tty_driver(serial_driver);
4357 serial_driver = NULL;
4358 return rc;
4359 }
4360
4361 printk("%s %s, tty major#%d\n",
4362 driver_name, driver_version,
4363 serial_driver->major);
4364 return 0;
4365 }
4366
4367 /* enumerate user specified ISA adapters
4368 */
mgsl_enum_isa_devices(void)4369 static void mgsl_enum_isa_devices(void)
4370 {
4371 struct mgsl_struct *info;
4372 int i;
4373
4374 /* Check for user specified ISA devices */
4375
4376 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4377 if ( debug_level >= DEBUG_LEVEL_INFO )
4378 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4379 io[i], irq[i], dma[i] );
4380
4381 info = mgsl_allocate_device();
4382 if ( !info ) {
4383 /* error allocating device instance data */
4384 if ( debug_level >= DEBUG_LEVEL_ERROR )
4385 printk( "can't allocate device instance data.\n");
4386 continue;
4387 }
4388
4389 /* Copy user configuration info to device instance data */
4390 info->io_base = (unsigned int)io[i];
4391 info->irq_level = (unsigned int)irq[i];
4392 info->irq_level = irq_canonicalize(info->irq_level);
4393 info->dma_level = (unsigned int)dma[i];
4394 info->bus_type = MGSL_BUS_TYPE_ISA;
4395 info->io_addr_size = 16;
4396 info->irq_flags = 0;
4397
4398 mgsl_add_device( info );
4399 }
4400 }
4401
synclink_cleanup(void)4402 static void synclink_cleanup(void)
4403 {
4404 int rc;
4405 struct mgsl_struct *info;
4406 struct mgsl_struct *tmp;
4407
4408 printk("Unloading %s: %s\n", driver_name, driver_version);
4409
4410 if (serial_driver) {
4411 if ((rc = tty_unregister_driver(serial_driver)))
4412 printk("%s(%d) failed to unregister tty driver err=%d\n",
4413 __FILE__,__LINE__,rc);
4414 put_tty_driver(serial_driver);
4415 }
4416
4417 info = mgsl_device_list;
4418 while(info) {
4419 #if SYNCLINK_GENERIC_HDLC
4420 hdlcdev_exit(info);
4421 #endif
4422 mgsl_release_resources(info);
4423 tmp = info;
4424 info = info->next_device;
4425 kfree(tmp);
4426 }
4427
4428 if (pci_registered)
4429 pci_unregister_driver(&synclink_pci_driver);
4430 }
4431
synclink_init(void)4432 static int __init synclink_init(void)
4433 {
4434 int rc;
4435
4436 if (break_on_load) {
4437 mgsl_get_text_ptr();
4438 BREAKPOINT();
4439 }
4440
4441 printk("%s %s\n", driver_name, driver_version);
4442
4443 mgsl_enum_isa_devices();
4444 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4445 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4446 else
4447 pci_registered = true;
4448
4449 if ((rc = mgsl_init_tty()) < 0)
4450 goto error;
4451
4452 return 0;
4453
4454 error:
4455 synclink_cleanup();
4456 return rc;
4457 }
4458
synclink_exit(void)4459 static void __exit synclink_exit(void)
4460 {
4461 synclink_cleanup();
4462 }
4463
4464 module_init(synclink_init);
4465 module_exit(synclink_exit);
4466
4467 /*
4468 * usc_RTCmd()
4469 *
4470 * Issue a USC Receive/Transmit command to the
4471 * Channel Command/Address Register (CCAR).
4472 *
4473 * Notes:
4474 *
4475 * The command is encoded in the most significant 5 bits <15..11>
4476 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4477 * and Bits <6..0> must be written as zeros.
4478 *
4479 * Arguments:
4480 *
4481 * info pointer to device information structure
4482 * Cmd command mask (use symbolic macros)
4483 *
4484 * Return Value:
4485 *
4486 * None
4487 */
usc_RTCmd(struct mgsl_struct * info,u16 Cmd)4488 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4489 {
4490 /* output command to CCAR in bits <15..11> */
4491 /* preserve bits <10..7>, bits <6..0> must be zero */
4492
4493 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4494
4495 /* Read to flush write to CCAR */
4496 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4497 inw( info->io_base + CCAR );
4498
4499 } /* end of usc_RTCmd() */
4500
4501 /*
4502 * usc_DmaCmd()
4503 *
4504 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4505 *
4506 * Arguments:
4507 *
4508 * info pointer to device information structure
4509 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4510 *
4511 * Return Value:
4512 *
4513 * None
4514 */
usc_DmaCmd(struct mgsl_struct * info,u16 Cmd)4515 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4516 {
4517 /* write command mask to DCAR */
4518 outw( Cmd + info->mbre_bit, info->io_base );
4519
4520 /* Read to flush write to DCAR */
4521 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4522 inw( info->io_base );
4523
4524 } /* end of usc_DmaCmd() */
4525
4526 /*
4527 * usc_OutDmaReg()
4528 *
4529 * Write a 16-bit value to a USC DMA register
4530 *
4531 * Arguments:
4532 *
4533 * info pointer to device info structure
4534 * RegAddr register address (number) for write
4535 * RegValue 16-bit value to write to register
4536 *
4537 * Return Value:
4538 *
4539 * None
4540 *
4541 */
usc_OutDmaReg(struct mgsl_struct * info,u16 RegAddr,u16 RegValue)4542 static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4543 {
4544 /* Note: The DCAR is located at the adapter base address */
4545 /* Note: must preserve state of BIT8 in DCAR */
4546
4547 outw( RegAddr + info->mbre_bit, info->io_base );
4548 outw( RegValue, info->io_base );
4549
4550 /* Read to flush write to DCAR */
4551 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4552 inw( info->io_base );
4553
4554 } /* end of usc_OutDmaReg() */
4555
4556 /*
4557 * usc_InDmaReg()
4558 *
4559 * Read a 16-bit value from a DMA register
4560 *
4561 * Arguments:
4562 *
4563 * info pointer to device info structure
4564 * RegAddr register address (number) to read from
4565 *
4566 * Return Value:
4567 *
4568 * The 16-bit value read from register
4569 *
4570 */
usc_InDmaReg(struct mgsl_struct * info,u16 RegAddr)4571 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4572 {
4573 /* Note: The DCAR is located at the adapter base address */
4574 /* Note: must preserve state of BIT8 in DCAR */
4575
4576 outw( RegAddr + info->mbre_bit, info->io_base );
4577 return inw( info->io_base );
4578
4579 } /* end of usc_InDmaReg() */
4580
4581 /*
4582 *
4583 * usc_OutReg()
4584 *
4585 * Write a 16-bit value to a USC serial channel register
4586 *
4587 * Arguments:
4588 *
4589 * info pointer to device info structure
4590 * RegAddr register address (number) to write to
4591 * RegValue 16-bit value to write to register
4592 *
4593 * Return Value:
4594 *
4595 * None
4596 *
4597 */
usc_OutReg(struct mgsl_struct * info,u16 RegAddr,u16 RegValue)4598 static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4599 {
4600 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4601 outw( RegValue, info->io_base + CCAR );
4602
4603 /* Read to flush write to CCAR */
4604 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4605 inw( info->io_base + CCAR );
4606
4607 } /* end of usc_OutReg() */
4608
4609 /*
4610 * usc_InReg()
4611 *
4612 * Reads a 16-bit value from a USC serial channel register
4613 *
4614 * Arguments:
4615 *
4616 * info pointer to device extension
4617 * RegAddr register address (number) to read from
4618 *
4619 * Return Value:
4620 *
4621 * 16-bit value read from register
4622 */
usc_InReg(struct mgsl_struct * info,u16 RegAddr)4623 static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4624 {
4625 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4626 return inw( info->io_base + CCAR );
4627
4628 } /* end of usc_InReg() */
4629
4630 /* usc_set_sdlc_mode()
4631 *
4632 * Set up the adapter for SDLC DMA communications.
4633 *
4634 * Arguments: info pointer to device instance data
4635 * Return Value: NONE
4636 */
usc_set_sdlc_mode(struct mgsl_struct * info)4637 static void usc_set_sdlc_mode( struct mgsl_struct *info )
4638 {
4639 u16 RegValue;
4640 bool PreSL1660;
4641
4642 /*
4643 * determine if the IUSC on the adapter is pre-SL1660. If
4644 * not, take advantage of the UnderWait feature of more
4645 * modern chips. If an underrun occurs and this bit is set,
4646 * the transmitter will idle the programmed idle pattern
4647 * until the driver has time to service the underrun. Otherwise,
4648 * the dma controller may get the cycles previously requested
4649 * and begin transmitting queued tx data.
4650 */
4651 usc_OutReg(info,TMCR,0x1f);
4652 RegValue=usc_InReg(info,TMDR);
4653 PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4654
4655 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4656 {
4657 /*
4658 ** Channel Mode Register (CMR)
4659 **
4660 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4661 ** <13> 0 0 = Transmit Disabled (initially)
4662 ** <12> 0 1 = Consecutive Idles share common 0
4663 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4664 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4665 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4666 **
4667 ** 1000 1110 0000 0110 = 0x8e06
4668 */
4669 RegValue = 0x8e06;
4670
4671 /*--------------------------------------------------
4672 * ignore user options for UnderRun Actions and
4673 * preambles
4674 *--------------------------------------------------*/
4675 }
4676 else
4677 {
4678 /* Channel mode Register (CMR)
4679 *
4680 * <15..14> 00 Tx Sub modes, Underrun Action
4681 * <13> 0 1 = Send Preamble before opening flag
4682 * <12> 0 1 = Consecutive Idles share common 0
4683 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4684 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4685 * <3..0> 0110 Receiver mode = HDLC/SDLC
4686 *
4687 * 0000 0110 0000 0110 = 0x0606
4688 */
4689 if (info->params.mode == MGSL_MODE_RAW) {
4690 RegValue = 0x0001; /* Set Receive mode = external sync */
4691
4692 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4693 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4694
4695 /*
4696 * TxSubMode:
4697 * CMR <15> 0 Don't send CRC on Tx Underrun
4698 * CMR <14> x undefined
4699 * CMR <13> 0 Send preamble before openning sync
4700 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4701 *
4702 * TxMode:
4703 * CMR <11-8) 0100 MonoSync
4704 *
4705 * 0x00 0100 xxxx xxxx 04xx
4706 */
4707 RegValue |= 0x0400;
4708 }
4709 else {
4710
4711 RegValue = 0x0606;
4712
4713 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4714 RegValue |= BIT14;
4715 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4716 RegValue |= BIT15;
4717 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4718 RegValue |= BIT15 + BIT14;
4719 }
4720
4721 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4722 RegValue |= BIT13;
4723 }
4724
4725 if ( info->params.mode == MGSL_MODE_HDLC &&
4726 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4727 RegValue |= BIT12;
4728
4729 if ( info->params.addr_filter != 0xff )
4730 {
4731 /* set up receive address filtering */
4732 usc_OutReg( info, RSR, info->params.addr_filter );
4733 RegValue |= BIT4;
4734 }
4735
4736 usc_OutReg( info, CMR, RegValue );
4737 info->cmr_value = RegValue;
4738
4739 /* Receiver mode Register (RMR)
4740 *
4741 * <15..13> 000 encoding
4742 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4743 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4744 * <9> 0 1 = Include Receive chars in CRC
4745 * <8> 1 1 = Use Abort/PE bit as abort indicator
4746 * <7..6> 00 Even parity
4747 * <5> 0 parity disabled
4748 * <4..2> 000 Receive Char Length = 8 bits
4749 * <1..0> 00 Disable Receiver
4750 *
4751 * 0000 0101 0000 0000 = 0x0500
4752 */
4753
4754 RegValue = 0x0500;
4755
4756 switch ( info->params.encoding ) {
4757 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4758 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4759 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4760 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4761 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4762 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4763 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4764 }
4765
4766 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4767 RegValue |= BIT9;
4768 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4769 RegValue |= ( BIT12 | BIT10 | BIT9 );
4770
4771 usc_OutReg( info, RMR, RegValue );
4772
4773 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4774 /* When an opening flag of an SDLC frame is recognized the */
4775 /* Receive Character count (RCC) is loaded with the value in */
4776 /* RCLR. The RCC is decremented for each received byte. The */
4777 /* value of RCC is stored after the closing flag of the frame */
4778 /* allowing the frame size to be computed. */
4779
4780 usc_OutReg( info, RCLR, RCLRVALUE );
4781
4782 usc_RCmd( info, RCmd_SelectRicrdma_level );
4783
4784 /* Receive Interrupt Control Register (RICR)
4785 *
4786 * <15..8> ? RxFIFO DMA Request Level
4787 * <7> 0 Exited Hunt IA (Interrupt Arm)
4788 * <6> 0 Idle Received IA
4789 * <5> 0 Break/Abort IA
4790 * <4> 0 Rx Bound IA
4791 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4792 * <2> 0 Abort/PE IA
4793 * <1> 1 Rx Overrun IA
4794 * <0> 0 Select TC0 value for readback
4795 *
4796 * 0000 0000 0000 1000 = 0x000a
4797 */
4798
4799 /* Carry over the Exit Hunt and Idle Received bits */
4800 /* in case they have been armed by usc_ArmEvents. */
4801
4802 RegValue = usc_InReg( info, RICR ) & 0xc0;
4803
4804 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4805 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4806 else
4807 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4808
4809 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4810
4811 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4812 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4813
4814 /* Transmit mode Register (TMR)
4815 *
4816 * <15..13> 000 encoding
4817 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4818 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4819 * <9> 0 1 = Tx CRC Enabled
4820 * <8> 0 1 = Append CRC to end of transmit frame
4821 * <7..6> 00 Transmit parity Even
4822 * <5> 0 Transmit parity Disabled
4823 * <4..2> 000 Tx Char Length = 8 bits
4824 * <1..0> 00 Disable Transmitter
4825 *
4826 * 0000 0100 0000 0000 = 0x0400
4827 */
4828
4829 RegValue = 0x0400;
4830
4831 switch ( info->params.encoding ) {
4832 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4833 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4834 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4835 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4836 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4837 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4838 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4839 }
4840
4841 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4842 RegValue |= BIT9 + BIT8;
4843 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4844 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4845
4846 usc_OutReg( info, TMR, RegValue );
4847
4848 usc_set_txidle( info );
4849
4850
4851 usc_TCmd( info, TCmd_SelectTicrdma_level );
4852
4853 /* Transmit Interrupt Control Register (TICR)
4854 *
4855 * <15..8> ? Transmit FIFO DMA Level
4856 * <7> 0 Present IA (Interrupt Arm)
4857 * <6> 0 Idle Sent IA
4858 * <5> 1 Abort Sent IA
4859 * <4> 1 EOF/EOM Sent IA
4860 * <3> 0 CRC Sent IA
4861 * <2> 1 1 = Wait for SW Trigger to Start Frame
4862 * <1> 1 Tx Underrun IA
4863 * <0> 0 TC0 constant on read back
4864 *
4865 * 0000 0000 0011 0110 = 0x0036
4866 */
4867
4868 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4869 usc_OutReg( info, TICR, 0x0736 );
4870 else
4871 usc_OutReg( info, TICR, 0x1436 );
4872
4873 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4874 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4875
4876 /*
4877 ** Transmit Command/Status Register (TCSR)
4878 **
4879 ** <15..12> 0000 TCmd
4880 ** <11> 0/1 UnderWait
4881 ** <10..08> 000 TxIdle
4882 ** <7> x PreSent
4883 ** <6> x IdleSent
4884 ** <5> x AbortSent
4885 ** <4> x EOF/EOM Sent
4886 ** <3> x CRC Sent
4887 ** <2> x All Sent
4888 ** <1> x TxUnder
4889 ** <0> x TxEmpty
4890 **
4891 ** 0000 0000 0000 0000 = 0x0000
4892 */
4893 info->tcsr_value = 0;
4894
4895 if ( !PreSL1660 )
4896 info->tcsr_value |= TCSR_UNDERWAIT;
4897
4898 usc_OutReg( info, TCSR, info->tcsr_value );
4899
4900 /* Clock mode Control Register (CMCR)
4901 *
4902 * <15..14> 00 counter 1 Source = Disabled
4903 * <13..12> 00 counter 0 Source = Disabled
4904 * <11..10> 11 BRG1 Input is TxC Pin
4905 * <9..8> 11 BRG0 Input is TxC Pin
4906 * <7..6> 01 DPLL Input is BRG1 Output
4907 * <5..3> XXX TxCLK comes from Port 0
4908 * <2..0> XXX RxCLK comes from Port 1
4909 *
4910 * 0000 1111 0111 0111 = 0x0f77
4911 */
4912
4913 RegValue = 0x0f40;
4914
4915 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4916 RegValue |= 0x0003; /* RxCLK from DPLL */
4917 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4918 RegValue |= 0x0004; /* RxCLK from BRG0 */
4919 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4920 RegValue |= 0x0006; /* RxCLK from TXC Input */
4921 else
4922 RegValue |= 0x0007; /* RxCLK from Port1 */
4923
4924 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4925 RegValue |= 0x0018; /* TxCLK from DPLL */
4926 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4927 RegValue |= 0x0020; /* TxCLK from BRG0 */
4928 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4929 RegValue |= 0x0038; /* RxCLK from TXC Input */
4930 else
4931 RegValue |= 0x0030; /* TxCLK from Port0 */
4932
4933 usc_OutReg( info, CMCR, RegValue );
4934
4935
4936 /* Hardware Configuration Register (HCR)
4937 *
4938 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4939 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4940 * <12> 0 CVOK:0=report code violation in biphase
4941 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4942 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4943 * <7..6> 00 reserved
4944 * <5> 0 BRG1 mode:0=continuous,1=single cycle
4945 * <4> X BRG1 Enable
4946 * <3..2> 00 reserved
4947 * <1> 0 BRG0 mode:0=continuous,1=single cycle
4948 * <0> 0 BRG0 Enable
4949 */
4950
4951 RegValue = 0x0000;
4952
4953 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
4954 u32 XtalSpeed;
4955 u32 DpllDivisor;
4956 u16 Tc;
4957
4958 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4959 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4960
4961 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4962 XtalSpeed = 11059200;
4963 else
4964 XtalSpeed = 14745600;
4965
4966 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4967 DpllDivisor = 16;
4968 RegValue |= BIT10;
4969 }
4970 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4971 DpllDivisor = 8;
4972 RegValue |= BIT11;
4973 }
4974 else
4975 DpllDivisor = 32;
4976
4977 /* Tc = (Xtal/Speed) - 1 */
4978 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4979 /* then rounding up gives a more precise time constant. Instead */
4980 /* of rounding up and then subtracting 1 we just don't subtract */
4981 /* the one in this case. */
4982
4983 /*--------------------------------------------------
4984 * ejz: for DPLL mode, application should use the
4985 * same clock speed as the partner system, even
4986 * though clocking is derived from the input RxData.
4987 * In case the user uses a 0 for the clock speed,
4988 * default to 0xffffffff and don't try to divide by
4989 * zero
4990 *--------------------------------------------------*/
4991 if ( info->params.clock_speed )
4992 {
4993 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4994 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4995 / info->params.clock_speed) )
4996 Tc--;
4997 }
4998 else
4999 Tc = -1;
5000
5001
5002 /* Write 16-bit Time Constant for BRG1 */
5003 usc_OutReg( info, TC1R, Tc );
5004
5005 RegValue |= BIT4; /* enable BRG1 */
5006
5007 switch ( info->params.encoding ) {
5008 case HDLC_ENCODING_NRZ:
5009 case HDLC_ENCODING_NRZB:
5010 case HDLC_ENCODING_NRZI_MARK:
5011 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5012 case HDLC_ENCODING_BIPHASE_MARK:
5013 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5014 case HDLC_ENCODING_BIPHASE_LEVEL:
5015 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5016 }
5017 }
5018
5019 usc_OutReg( info, HCR, RegValue );
5020
5021
5022 /* Channel Control/status Register (CCSR)
5023 *
5024 * <15> X RCC FIFO Overflow status (RO)
5025 * <14> X RCC FIFO Not Empty status (RO)
5026 * <13> 0 1 = Clear RCC FIFO (WO)
5027 * <12> X DPLL Sync (RW)
5028 * <11> X DPLL 2 Missed Clocks status (RO)
5029 * <10> X DPLL 1 Missed Clock status (RO)
5030 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5031 * <7> X SDLC Loop On status (RO)
5032 * <6> X SDLC Loop Send status (RO)
5033 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5034 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5035 * <1..0> 00 reserved
5036 *
5037 * 0000 0000 0010 0000 = 0x0020
5038 */
5039
5040 usc_OutReg( info, CCSR, 0x1020 );
5041
5042
5043 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5044 usc_OutReg( info, SICR,
5045 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5046 }
5047
5048
5049 /* enable Master Interrupt Enable bit (MIE) */
5050 usc_EnableMasterIrqBit( info );
5051
5052 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5053 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5054
5055 /* arm RCC underflow interrupt */
5056 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5057 usc_EnableInterrupts(info, MISC);
5058
5059 info->mbre_bit = 0;
5060 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5061 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5062 info->mbre_bit = BIT8;
5063 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5064
5065 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5066 /* Enable DMAEN (Port 7, Bit 14) */
5067 /* This connects the DMA request signal to the ISA bus */
5068 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5069 }
5070
5071 /* DMA Control Register (DCR)
5072 *
5073 * <15..14> 10 Priority mode = Alternating Tx/Rx
5074 * 01 Rx has priority
5075 * 00 Tx has priority
5076 *
5077 * <13> 1 Enable Priority Preempt per DCR<15..14>
5078 * (WARNING DCR<11..10> must be 00 when this is 1)
5079 * 0 Choose activate channel per DCR<11..10>
5080 *
5081 * <12> 0 Little Endian for Array/List
5082 * <11..10> 00 Both Channels can use each bus grant
5083 * <9..6> 0000 reserved
5084 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5085 * <4> 0 1 = drive D/C and S/D pins
5086 * <3> 1 1 = Add one wait state to all DMA cycles.
5087 * <2> 0 1 = Strobe /UAS on every transfer.
5088 * <1..0> 11 Addr incrementing only affects LS24 bits
5089 *
5090 * 0110 0000 0000 1011 = 0x600b
5091 */
5092
5093 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5094 /* PCI adapter does not need DMA wait state */
5095 usc_OutDmaReg( info, DCR, 0xa00b );
5096 }
5097 else
5098 usc_OutDmaReg( info, DCR, 0x800b );
5099
5100
5101 /* Receive DMA mode Register (RDMR)
5102 *
5103 * <15..14> 11 DMA mode = Linked List Buffer mode
5104 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5105 * <12> 1 Clear count of List Entry after fetching
5106 * <11..10> 00 Address mode = Increment
5107 * <9> 1 Terminate Buffer on RxBound
5108 * <8> 0 Bus Width = 16bits
5109 * <7..0> ? status Bits (write as 0s)
5110 *
5111 * 1111 0010 0000 0000 = 0xf200
5112 */
5113
5114 usc_OutDmaReg( info, RDMR, 0xf200 );
5115
5116
5117 /* Transmit DMA mode Register (TDMR)
5118 *
5119 * <15..14> 11 DMA mode = Linked List Buffer mode
5120 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5121 * <12> 1 Clear count of List Entry after fetching
5122 * <11..10> 00 Address mode = Increment
5123 * <9> 1 Terminate Buffer on end of frame
5124 * <8> 0 Bus Width = 16bits
5125 * <7..0> ? status Bits (Read Only so write as 0)
5126 *
5127 * 1111 0010 0000 0000 = 0xf200
5128 */
5129
5130 usc_OutDmaReg( info, TDMR, 0xf200 );
5131
5132
5133 /* DMA Interrupt Control Register (DICR)
5134 *
5135 * <15> 1 DMA Interrupt Enable
5136 * <14> 0 1 = Disable IEO from USC
5137 * <13> 0 1 = Don't provide vector during IntAck
5138 * <12> 1 1 = Include status in Vector
5139 * <10..2> 0 reserved, Must be 0s
5140 * <1> 0 1 = Rx DMA Interrupt Enabled
5141 * <0> 0 1 = Tx DMA Interrupt Enabled
5142 *
5143 * 1001 0000 0000 0000 = 0x9000
5144 */
5145
5146 usc_OutDmaReg( info, DICR, 0x9000 );
5147
5148 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5149 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5150 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5151
5152 /* Channel Control Register (CCR)
5153 *
5154 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5155 * <13> 0 Trigger Tx on SW Command Disabled
5156 * <12> 0 Flag Preamble Disabled
5157 * <11..10> 00 Preamble Length
5158 * <9..8> 00 Preamble Pattern
5159 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5160 * <5> 0 Trigger Rx on SW Command Disabled
5161 * <4..0> 0 reserved
5162 *
5163 * 1000 0000 1000 0000 = 0x8080
5164 */
5165
5166 RegValue = 0x8080;
5167
5168 switch ( info->params.preamble_length ) {
5169 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5170 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5171 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5172 }
5173
5174 switch ( info->params.preamble ) {
5175 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5176 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5177 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5178 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5179 }
5180
5181 usc_OutReg( info, CCR, RegValue );
5182
5183
5184 /*
5185 * Burst/Dwell Control Register
5186 *
5187 * <15..8> 0x20 Maximum number of transfers per bus grant
5188 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5189 */
5190
5191 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5192 /* don't limit bus occupancy on PCI adapter */
5193 usc_OutDmaReg( info, BDCR, 0x0000 );
5194 }
5195 else
5196 usc_OutDmaReg( info, BDCR, 0x2000 );
5197
5198 usc_stop_transmitter(info);
5199 usc_stop_receiver(info);
5200
5201 } /* end of usc_set_sdlc_mode() */
5202
5203 /* usc_enable_loopback()
5204 *
5205 * Set the 16C32 for internal loopback mode.
5206 * The TxCLK and RxCLK signals are generated from the BRG0 and
5207 * the TxD is looped back to the RxD internally.
5208 *
5209 * Arguments: info pointer to device instance data
5210 * enable 1 = enable loopback, 0 = disable
5211 * Return Value: None
5212 */
usc_enable_loopback(struct mgsl_struct * info,int enable)5213 static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5214 {
5215 if (enable) {
5216 /* blank external TXD output */
5217 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5218
5219 /* Clock mode Control Register (CMCR)
5220 *
5221 * <15..14> 00 counter 1 Disabled
5222 * <13..12> 00 counter 0 Disabled
5223 * <11..10> 11 BRG1 Input is TxC Pin
5224 * <9..8> 11 BRG0 Input is TxC Pin
5225 * <7..6> 01 DPLL Input is BRG1 Output
5226 * <5..3> 100 TxCLK comes from BRG0
5227 * <2..0> 100 RxCLK comes from BRG0
5228 *
5229 * 0000 1111 0110 0100 = 0x0f64
5230 */
5231
5232 usc_OutReg( info, CMCR, 0x0f64 );
5233
5234 /* Write 16-bit Time Constant for BRG0 */
5235 /* use clock speed if available, otherwise use 8 for diagnostics */
5236 if (info->params.clock_speed) {
5237 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5238 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5239 else
5240 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5241 } else
5242 usc_OutReg(info, TC0R, (u16)8);
5243
5244 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5245 mode = Continuous Set Bit 0 to enable BRG0. */
5246 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5247
5248 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5249 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5250
5251 /* set Internal Data loopback mode */
5252 info->loopback_bits = 0x300;
5253 outw( 0x0300, info->io_base + CCAR );
5254 } else {
5255 /* enable external TXD output */
5256 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5257
5258 /* clear Internal Data loopback mode */
5259 info->loopback_bits = 0;
5260 outw( 0,info->io_base + CCAR );
5261 }
5262
5263 } /* end of usc_enable_loopback() */
5264
5265 /* usc_enable_aux_clock()
5266 *
5267 * Enabled the AUX clock output at the specified frequency.
5268 *
5269 * Arguments:
5270 *
5271 * info pointer to device extension
5272 * data_rate data rate of clock in bits per second
5273 * A data rate of 0 disables the AUX clock.
5274 *
5275 * Return Value: None
5276 */
usc_enable_aux_clock(struct mgsl_struct * info,u32 data_rate)5277 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5278 {
5279 u32 XtalSpeed;
5280 u16 Tc;
5281
5282 if ( data_rate ) {
5283 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5284 XtalSpeed = 11059200;
5285 else
5286 XtalSpeed = 14745600;
5287
5288
5289 /* Tc = (Xtal/Speed) - 1 */
5290 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5291 /* then rounding up gives a more precise time constant. Instead */
5292 /* of rounding up and then subtracting 1 we just don't subtract */
5293 /* the one in this case. */
5294
5295
5296 Tc = (u16)(XtalSpeed/data_rate);
5297 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5298 Tc--;
5299
5300 /* Write 16-bit Time Constant for BRG0 */
5301 usc_OutReg( info, TC0R, Tc );
5302
5303 /*
5304 * Hardware Configuration Register (HCR)
5305 * Clear Bit 1, BRG0 mode = Continuous
5306 * Set Bit 0 to enable BRG0.
5307 */
5308
5309 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5310
5311 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5312 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5313 } else {
5314 /* data rate == 0 so turn off BRG0 */
5315 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5316 }
5317
5318 } /* end of usc_enable_aux_clock() */
5319
5320 /*
5321 *
5322 * usc_process_rxoverrun_sync()
5323 *
5324 * This function processes a receive overrun by resetting the
5325 * receive DMA buffers and issuing a Purge Rx FIFO command
5326 * to allow the receiver to continue receiving.
5327 *
5328 * Arguments:
5329 *
5330 * info pointer to device extension
5331 *
5332 * Return Value: None
5333 */
usc_process_rxoverrun_sync(struct mgsl_struct * info)5334 static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5335 {
5336 int start_index;
5337 int end_index;
5338 int frame_start_index;
5339 bool start_of_frame_found = false;
5340 bool end_of_frame_found = false;
5341 bool reprogram_dma = false;
5342
5343 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5344 u32 phys_addr;
5345
5346 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5347 usc_RCmd( info, RCmd_EnterHuntmode );
5348 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5349
5350 /* CurrentRxBuffer points to the 1st buffer of the next */
5351 /* possibly available receive frame. */
5352
5353 frame_start_index = start_index = end_index = info->current_rx_buffer;
5354
5355 /* Search for an unfinished string of buffers. This means */
5356 /* that a receive frame started (at least one buffer with */
5357 /* count set to zero) but there is no terminiting buffer */
5358 /* (status set to non-zero). */
5359
5360 while( !buffer_list[end_index].count )
5361 {
5362 /* Count field has been reset to zero by 16C32. */
5363 /* This buffer is currently in use. */
5364
5365 if ( !start_of_frame_found )
5366 {
5367 start_of_frame_found = true;
5368 frame_start_index = end_index;
5369 end_of_frame_found = false;
5370 }
5371
5372 if ( buffer_list[end_index].status )
5373 {
5374 /* Status field has been set by 16C32. */
5375 /* This is the last buffer of a received frame. */
5376
5377 /* We want to leave the buffers for this frame intact. */
5378 /* Move on to next possible frame. */
5379
5380 start_of_frame_found = false;
5381 end_of_frame_found = true;
5382 }
5383
5384 /* advance to next buffer entry in linked list */
5385 end_index++;
5386 if ( end_index == info->rx_buffer_count )
5387 end_index = 0;
5388
5389 if ( start_index == end_index )
5390 {
5391 /* The entire list has been searched with all Counts == 0 and */
5392 /* all Status == 0. The receive buffers are */
5393 /* completely screwed, reset all receive buffers! */
5394 mgsl_reset_rx_dma_buffers( info );
5395 frame_start_index = 0;
5396 start_of_frame_found = false;
5397 reprogram_dma = true;
5398 break;
5399 }
5400 }
5401
5402 if ( start_of_frame_found && !end_of_frame_found )
5403 {
5404 /* There is an unfinished string of receive DMA buffers */
5405 /* as a result of the receiver overrun. */
5406
5407 /* Reset the buffers for the unfinished frame */
5408 /* and reprogram the receive DMA controller to start */
5409 /* at the 1st buffer of unfinished frame. */
5410
5411 start_index = frame_start_index;
5412
5413 do
5414 {
5415 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5416
5417 /* Adjust index for wrap around. */
5418 if ( start_index == info->rx_buffer_count )
5419 start_index = 0;
5420
5421 } while( start_index != end_index );
5422
5423 reprogram_dma = true;
5424 }
5425
5426 if ( reprogram_dma )
5427 {
5428 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5429 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5430 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5431
5432 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5433
5434 /* This empties the receive FIFO and loads the RCC with RCLR */
5435 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5436
5437 /* program 16C32 with physical address of 1st DMA buffer entry */
5438 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5439 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5440 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5441
5442 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5443 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5444 usc_EnableInterrupts( info, RECEIVE_STATUS );
5445
5446 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5447 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5448
5449 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5450 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5451 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5452 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5453 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5454 else
5455 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5456 }
5457 else
5458 {
5459 /* This empties the receive FIFO and loads the RCC with RCLR */
5460 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5461 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5462 }
5463
5464 } /* end of usc_process_rxoverrun_sync() */
5465
5466 /* usc_stop_receiver()
5467 *
5468 * Disable USC receiver
5469 *
5470 * Arguments: info pointer to device instance data
5471 * Return Value: None
5472 */
usc_stop_receiver(struct mgsl_struct * info)5473 static void usc_stop_receiver( struct mgsl_struct *info )
5474 {
5475 if (debug_level >= DEBUG_LEVEL_ISR)
5476 printk("%s(%d):usc_stop_receiver(%s)\n",
5477 __FILE__,__LINE__, info->device_name );
5478
5479 /* Disable receive DMA channel. */
5480 /* This also disables receive DMA channel interrupts */
5481 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5482
5483 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5484 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5485 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5486
5487 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5488
5489 /* This empties the receive FIFO and loads the RCC with RCLR */
5490 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5491 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5492
5493 info->rx_enabled = false;
5494 info->rx_overflow = false;
5495 info->rx_rcc_underrun = false;
5496
5497 } /* end of stop_receiver() */
5498
5499 /* usc_start_receiver()
5500 *
5501 * Enable the USC receiver
5502 *
5503 * Arguments: info pointer to device instance data
5504 * Return Value: None
5505 */
usc_start_receiver(struct mgsl_struct * info)5506 static void usc_start_receiver( struct mgsl_struct *info )
5507 {
5508 u32 phys_addr;
5509
5510 if (debug_level >= DEBUG_LEVEL_ISR)
5511 printk("%s(%d):usc_start_receiver(%s)\n",
5512 __FILE__,__LINE__, info->device_name );
5513
5514 mgsl_reset_rx_dma_buffers( info );
5515 usc_stop_receiver( info );
5516
5517 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5518 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5519
5520 if ( info->params.mode == MGSL_MODE_HDLC ||
5521 info->params.mode == MGSL_MODE_RAW ) {
5522 /* DMA mode Transfers */
5523 /* Program the DMA controller. */
5524 /* Enable the DMA controller end of buffer interrupt. */
5525
5526 /* program 16C32 with physical address of 1st DMA buffer entry */
5527 phys_addr = info->rx_buffer_list[0].phys_entry;
5528 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5529 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5530
5531 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5532 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5533 usc_EnableInterrupts( info, RECEIVE_STATUS );
5534
5535 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5536 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5537
5538 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5539 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5540 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5541 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5542 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5543 else
5544 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5545 } else {
5546 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5547 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5548 usc_EnableInterrupts(info, RECEIVE_DATA);
5549
5550 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5551 usc_RCmd( info, RCmd_EnterHuntmode );
5552
5553 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5554 }
5555
5556 usc_OutReg( info, CCSR, 0x1020 );
5557
5558 info->rx_enabled = true;
5559
5560 } /* end of usc_start_receiver() */
5561
5562 /* usc_start_transmitter()
5563 *
5564 * Enable the USC transmitter and send a transmit frame if
5565 * one is loaded in the DMA buffers.
5566 *
5567 * Arguments: info pointer to device instance data
5568 * Return Value: None
5569 */
usc_start_transmitter(struct mgsl_struct * info)5570 static void usc_start_transmitter( struct mgsl_struct *info )
5571 {
5572 u32 phys_addr;
5573 unsigned int FrameSize;
5574
5575 if (debug_level >= DEBUG_LEVEL_ISR)
5576 printk("%s(%d):usc_start_transmitter(%s)\n",
5577 __FILE__,__LINE__, info->device_name );
5578
5579 if ( info->xmit_cnt ) {
5580
5581 /* If auto RTS enabled and RTS is inactive, then assert */
5582 /* RTS and set a flag indicating that the driver should */
5583 /* negate RTS when the transmission completes. */
5584
5585 info->drop_rts_on_tx_done = false;
5586
5587 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5588 usc_get_serial_signals( info );
5589 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5590 info->serial_signals |= SerialSignal_RTS;
5591 usc_set_serial_signals( info );
5592 info->drop_rts_on_tx_done = true;
5593 }
5594 }
5595
5596
5597 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5598 if ( !info->tx_active ) {
5599 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5600 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5601 usc_EnableInterrupts(info, TRANSMIT_DATA);
5602 usc_load_txfifo(info);
5603 }
5604 } else {
5605 /* Disable transmit DMA controller while programming. */
5606 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5607
5608 /* Transmit DMA buffer is loaded, so program USC */
5609 /* to send the frame contained in the buffers. */
5610
5611 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5612
5613 /* if operating in Raw sync mode, reset the rcc component
5614 * of the tx dma buffer entry, otherwise, the serial controller
5615 * will send a closing sync char after this count.
5616 */
5617 if ( info->params.mode == MGSL_MODE_RAW )
5618 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5619
5620 /* Program the Transmit Character Length Register (TCLR) */
5621 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5622 usc_OutReg( info, TCLR, (u16)FrameSize );
5623
5624 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5625
5626 /* Program the address of the 1st DMA Buffer Entry in linked list */
5627 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5628 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5629 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5630
5631 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5632 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5633 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5634
5635 if ( info->params.mode == MGSL_MODE_RAW &&
5636 info->num_tx_dma_buffers > 1 ) {
5637 /* When running external sync mode, attempt to 'stream' transmit */
5638 /* by filling tx dma buffers as they become available. To do this */
5639 /* we need to enable Tx DMA EOB Status interrupts : */
5640 /* */
5641 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5642 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5643
5644 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5645 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5646 }
5647
5648 /* Initialize Transmit DMA Channel */
5649 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5650
5651 usc_TCmd( info, TCmd_SendFrame );
5652
5653 mod_timer(&info->tx_timer, jiffies +
5654 msecs_to_jiffies(5000));
5655 }
5656 info->tx_active = true;
5657 }
5658
5659 if ( !info->tx_enabled ) {
5660 info->tx_enabled = true;
5661 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5662 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5663 else
5664 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5665 }
5666
5667 } /* end of usc_start_transmitter() */
5668
5669 /* usc_stop_transmitter()
5670 *
5671 * Stops the transmitter and DMA
5672 *
5673 * Arguments: info pointer to device isntance data
5674 * Return Value: None
5675 */
usc_stop_transmitter(struct mgsl_struct * info)5676 static void usc_stop_transmitter( struct mgsl_struct *info )
5677 {
5678 if (debug_level >= DEBUG_LEVEL_ISR)
5679 printk("%s(%d):usc_stop_transmitter(%s)\n",
5680 __FILE__,__LINE__, info->device_name );
5681
5682 del_timer(&info->tx_timer);
5683
5684 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5685 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5686 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5687
5688 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5689 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5690 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5691
5692 info->tx_enabled = false;
5693 info->tx_active = false;
5694
5695 } /* end of usc_stop_transmitter() */
5696
5697 /* usc_load_txfifo()
5698 *
5699 * Fill the transmit FIFO until the FIFO is full or
5700 * there is no more data to load.
5701 *
5702 * Arguments: info pointer to device extension (instance data)
5703 * Return Value: None
5704 */
usc_load_txfifo(struct mgsl_struct * info)5705 static void usc_load_txfifo( struct mgsl_struct *info )
5706 {
5707 int Fifocount;
5708 u8 TwoBytes[2];
5709
5710 if ( !info->xmit_cnt && !info->x_char )
5711 return;
5712
5713 /* Select transmit FIFO status readback in TICR */
5714 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5715
5716 /* load the Transmit FIFO until FIFOs full or all data sent */
5717
5718 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5719 /* there is more space in the transmit FIFO and */
5720 /* there is more data in transmit buffer */
5721
5722 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5723 /* write a 16-bit word from transmit buffer to 16C32 */
5724
5725 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5726 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5727 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5728 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5729
5730 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5731
5732 info->xmit_cnt -= 2;
5733 info->icount.tx += 2;
5734 } else {
5735 /* only 1 byte left to transmit or 1 FIFO slot left */
5736
5737 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5738 info->io_base + CCAR );
5739
5740 if (info->x_char) {
5741 /* transmit pending high priority char */
5742 outw( info->x_char,info->io_base + CCAR );
5743 info->x_char = 0;
5744 } else {
5745 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5746 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5747 info->xmit_cnt--;
5748 }
5749 info->icount.tx++;
5750 }
5751 }
5752
5753 } /* end of usc_load_txfifo() */
5754
5755 /* usc_reset()
5756 *
5757 * Reset the adapter to a known state and prepare it for further use.
5758 *
5759 * Arguments: info pointer to device instance data
5760 * Return Value: None
5761 */
usc_reset(struct mgsl_struct * info)5762 static void usc_reset( struct mgsl_struct *info )
5763 {
5764 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5765 int i;
5766 u32 readval;
5767
5768 /* Set BIT30 of Misc Control Register */
5769 /* (Local Control Register 0x50) to force reset of USC. */
5770
5771 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5772 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5773
5774 info->misc_ctrl_value |= BIT30;
5775 *MiscCtrl = info->misc_ctrl_value;
5776
5777 /*
5778 * Force at least 170ns delay before clearing
5779 * reset bit. Each read from LCR takes at least
5780 * 30ns so 10 times for 300ns to be safe.
5781 */
5782 for(i=0;i<10;i++)
5783 readval = *MiscCtrl;
5784
5785 info->misc_ctrl_value &= ~BIT30;
5786 *MiscCtrl = info->misc_ctrl_value;
5787
5788 *LCR0BRDR = BUS_DESCRIPTOR(
5789 1, // Write Strobe Hold (0-3)
5790 2, // Write Strobe Delay (0-3)
5791 2, // Read Strobe Delay (0-3)
5792 0, // NWDD (Write data-data) (0-3)
5793 4, // NWAD (Write Addr-data) (0-31)
5794 0, // NXDA (Read/Write Data-Addr) (0-3)
5795 0, // NRDD (Read Data-Data) (0-3)
5796 5 // NRAD (Read Addr-Data) (0-31)
5797 );
5798 } else {
5799 /* do HW reset */
5800 outb( 0,info->io_base + 8 );
5801 }
5802
5803 info->mbre_bit = 0;
5804 info->loopback_bits = 0;
5805 info->usc_idle_mode = 0;
5806
5807 /*
5808 * Program the Bus Configuration Register (BCR)
5809 *
5810 * <15> 0 Don't use separate address
5811 * <14..6> 0 reserved
5812 * <5..4> 00 IAckmode = Default, don't care
5813 * <3> 1 Bus Request Totem Pole output
5814 * <2> 1 Use 16 Bit data bus
5815 * <1> 0 IRQ Totem Pole output
5816 * <0> 0 Don't Shift Right Addr
5817 *
5818 * 0000 0000 0000 1100 = 0x000c
5819 *
5820 * By writing to io_base + SDPIN the Wait/Ack pin is
5821 * programmed to work as a Wait pin.
5822 */
5823
5824 outw( 0x000c,info->io_base + SDPIN );
5825
5826
5827 outw( 0,info->io_base );
5828 outw( 0,info->io_base + CCAR );
5829
5830 /* select little endian byte ordering */
5831 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5832
5833
5834 /* Port Control Register (PCR)
5835 *
5836 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5837 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5838 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5839 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5840 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5841 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5842 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5843 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5844 *
5845 * 1111 0000 1111 0101 = 0xf0f5
5846 */
5847
5848 usc_OutReg( info, PCR, 0xf0f5 );
5849
5850
5851 /*
5852 * Input/Output Control Register
5853 *
5854 * <15..14> 00 CTS is active low input
5855 * <13..12> 00 DCD is active low input
5856 * <11..10> 00 TxREQ pin is input (DSR)
5857 * <9..8> 00 RxREQ pin is input (RI)
5858 * <7..6> 00 TxD is output (Transmit Data)
5859 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5860 * <2..0> 100 RxC is Output (drive with BRG0)
5861 *
5862 * 0000 0000 0000 0100 = 0x0004
5863 */
5864
5865 usc_OutReg( info, IOCR, 0x0004 );
5866
5867 } /* end of usc_reset() */
5868
5869 /* usc_set_async_mode()
5870 *
5871 * Program adapter for asynchronous communications.
5872 *
5873 * Arguments: info pointer to device instance data
5874 * Return Value: None
5875 */
usc_set_async_mode(struct mgsl_struct * info)5876 static void usc_set_async_mode( struct mgsl_struct *info )
5877 {
5878 u16 RegValue;
5879
5880 /* disable interrupts while programming USC */
5881 usc_DisableMasterIrqBit( info );
5882
5883 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5884 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5885
5886 usc_loopback_frame( info );
5887
5888 /* Channel mode Register (CMR)
5889 *
5890 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5891 * <13..12> 00 00 = 16X Clock
5892 * <11..8> 0000 Transmitter mode = Asynchronous
5893 * <7..6> 00 reserved?
5894 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5895 * <3..0> 0000 Receiver mode = Asynchronous
5896 *
5897 * 0000 0000 0000 0000 = 0x0
5898 */
5899
5900 RegValue = 0;
5901 if ( info->params.stop_bits != 1 )
5902 RegValue |= BIT14;
5903 usc_OutReg( info, CMR, RegValue );
5904
5905
5906 /* Receiver mode Register (RMR)
5907 *
5908 * <15..13> 000 encoding = None
5909 * <12..08> 00000 reserved (Sync Only)
5910 * <7..6> 00 Even parity
5911 * <5> 0 parity disabled
5912 * <4..2> 000 Receive Char Length = 8 bits
5913 * <1..0> 00 Disable Receiver
5914 *
5915 * 0000 0000 0000 0000 = 0x0
5916 */
5917
5918 RegValue = 0;
5919
5920 if ( info->params.data_bits != 8 )
5921 RegValue |= BIT4+BIT3+BIT2;
5922
5923 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5924 RegValue |= BIT5;
5925 if ( info->params.parity != ASYNC_PARITY_ODD )
5926 RegValue |= BIT6;
5927 }
5928
5929 usc_OutReg( info, RMR, RegValue );
5930
5931
5932 /* Set IRQ trigger level */
5933
5934 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5935
5936
5937 /* Receive Interrupt Control Register (RICR)
5938 *
5939 * <15..8> ? RxFIFO IRQ Request Level
5940 *
5941 * Note: For async mode the receive FIFO level must be set
5942 * to 0 to avoid the situation where the FIFO contains fewer bytes
5943 * than the trigger level and no more data is expected.
5944 *
5945 * <7> 0 Exited Hunt IA (Interrupt Arm)
5946 * <6> 0 Idle Received IA
5947 * <5> 0 Break/Abort IA
5948 * <4> 0 Rx Bound IA
5949 * <3> 0 Queued status reflects oldest byte in FIFO
5950 * <2> 0 Abort/PE IA
5951 * <1> 0 Rx Overrun IA
5952 * <0> 0 Select TC0 value for readback
5953 *
5954 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5955 */
5956
5957 usc_OutReg( info, RICR, 0x0000 );
5958
5959 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5960 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5961
5962
5963 /* Transmit mode Register (TMR)
5964 *
5965 * <15..13> 000 encoding = None
5966 * <12..08> 00000 reserved (Sync Only)
5967 * <7..6> 00 Transmit parity Even
5968 * <5> 0 Transmit parity Disabled
5969 * <4..2> 000 Tx Char Length = 8 bits
5970 * <1..0> 00 Disable Transmitter
5971 *
5972 * 0000 0000 0000 0000 = 0x0
5973 */
5974
5975 RegValue = 0;
5976
5977 if ( info->params.data_bits != 8 )
5978 RegValue |= BIT4+BIT3+BIT2;
5979
5980 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5981 RegValue |= BIT5;
5982 if ( info->params.parity != ASYNC_PARITY_ODD )
5983 RegValue |= BIT6;
5984 }
5985
5986 usc_OutReg( info, TMR, RegValue );
5987
5988 usc_set_txidle( info );
5989
5990
5991 /* Set IRQ trigger level */
5992
5993 usc_TCmd( info, TCmd_SelectTicrIntLevel );
5994
5995
5996 /* Transmit Interrupt Control Register (TICR)
5997 *
5998 * <15..8> ? Transmit FIFO IRQ Level
5999 * <7> 0 Present IA (Interrupt Arm)
6000 * <6> 1 Idle Sent IA
6001 * <5> 0 Abort Sent IA
6002 * <4> 0 EOF/EOM Sent IA
6003 * <3> 0 CRC Sent IA
6004 * <2> 0 1 = Wait for SW Trigger to Start Frame
6005 * <1> 0 Tx Underrun IA
6006 * <0> 0 TC0 constant on read back
6007 *
6008 * 0000 0000 0100 0000 = 0x0040
6009 */
6010
6011 usc_OutReg( info, TICR, 0x1f40 );
6012
6013 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6014 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6015
6016 usc_enable_async_clock( info, info->params.data_rate );
6017
6018
6019 /* Channel Control/status Register (CCSR)
6020 *
6021 * <15> X RCC FIFO Overflow status (RO)
6022 * <14> X RCC FIFO Not Empty status (RO)
6023 * <13> 0 1 = Clear RCC FIFO (WO)
6024 * <12> X DPLL in Sync status (RO)
6025 * <11> X DPLL 2 Missed Clocks status (RO)
6026 * <10> X DPLL 1 Missed Clock status (RO)
6027 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6028 * <7> X SDLC Loop On status (RO)
6029 * <6> X SDLC Loop Send status (RO)
6030 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6031 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6032 * <1..0> 00 reserved
6033 *
6034 * 0000 0000 0010 0000 = 0x0020
6035 */
6036
6037 usc_OutReg( info, CCSR, 0x0020 );
6038
6039 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6040 RECEIVE_DATA + RECEIVE_STATUS );
6041
6042 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6043 RECEIVE_DATA + RECEIVE_STATUS );
6044
6045 usc_EnableMasterIrqBit( info );
6046
6047 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6048 /* Enable INTEN (Port 6, Bit12) */
6049 /* This connects the IRQ request signal to the ISA bus */
6050 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6051 }
6052
6053 if (info->params.loopback) {
6054 info->loopback_bits = 0x300;
6055 outw(0x0300, info->io_base + CCAR);
6056 }
6057
6058 } /* end of usc_set_async_mode() */
6059
6060 /* usc_loopback_frame()
6061 *
6062 * Loop back a small (2 byte) dummy SDLC frame.
6063 * Interrupts and DMA are NOT used. The purpose of this is to
6064 * clear any 'stale' status info left over from running in async mode.
6065 *
6066 * The 16C32 shows the strange behaviour of marking the 1st
6067 * received SDLC frame with a CRC error even when there is no
6068 * CRC error. To get around this a small dummy from of 2 bytes
6069 * is looped back when switching from async to sync mode.
6070 *
6071 * Arguments: info pointer to device instance data
6072 * Return Value: None
6073 */
usc_loopback_frame(struct mgsl_struct * info)6074 static void usc_loopback_frame( struct mgsl_struct *info )
6075 {
6076 int i;
6077 unsigned long oldmode = info->params.mode;
6078
6079 info->params.mode = MGSL_MODE_HDLC;
6080
6081 usc_DisableMasterIrqBit( info );
6082
6083 usc_set_sdlc_mode( info );
6084 usc_enable_loopback( info, 1 );
6085
6086 /* Write 16-bit Time Constant for BRG0 */
6087 usc_OutReg( info, TC0R, 0 );
6088
6089 /* Channel Control Register (CCR)
6090 *
6091 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6092 * <13> 0 Trigger Tx on SW Command Disabled
6093 * <12> 0 Flag Preamble Disabled
6094 * <11..10> 00 Preamble Length = 8-Bits
6095 * <9..8> 01 Preamble Pattern = flags
6096 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6097 * <5> 0 Trigger Rx on SW Command Disabled
6098 * <4..0> 0 reserved
6099 *
6100 * 0000 0001 0000 0000 = 0x0100
6101 */
6102
6103 usc_OutReg( info, CCR, 0x0100 );
6104
6105 /* SETUP RECEIVER */
6106 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6107 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6108
6109 /* SETUP TRANSMITTER */
6110 /* Program the Transmit Character Length Register (TCLR) */
6111 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6112 usc_OutReg( info, TCLR, 2 );
6113 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6114
6115 /* unlatch Tx status bits, and start transmit channel. */
6116 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6117 outw(0,info->io_base + DATAREG);
6118
6119 /* ENABLE TRANSMITTER */
6120 usc_TCmd( info, TCmd_SendFrame );
6121 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6122
6123 /* WAIT FOR RECEIVE COMPLETE */
6124 for (i=0 ; i<1000 ; i++)
6125 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6126 break;
6127
6128 /* clear Internal Data loopback mode */
6129 usc_enable_loopback(info, 0);
6130
6131 usc_EnableMasterIrqBit(info);
6132
6133 info->params.mode = oldmode;
6134
6135 } /* end of usc_loopback_frame() */
6136
6137 /* usc_set_sync_mode() Programs the USC for SDLC communications.
6138 *
6139 * Arguments: info pointer to adapter info structure
6140 * Return Value: None
6141 */
usc_set_sync_mode(struct mgsl_struct * info)6142 static void usc_set_sync_mode( struct mgsl_struct *info )
6143 {
6144 usc_loopback_frame( info );
6145 usc_set_sdlc_mode( info );
6146
6147 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6148 /* Enable INTEN (Port 6, Bit12) */
6149 /* This connects the IRQ request signal to the ISA bus */
6150 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6151 }
6152
6153 usc_enable_aux_clock(info, info->params.clock_speed);
6154
6155 if (info->params.loopback)
6156 usc_enable_loopback(info,1);
6157
6158 } /* end of mgsl_set_sync_mode() */
6159
6160 /* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6161 *
6162 * Arguments: info pointer to device instance data
6163 * Return Value: None
6164 */
usc_set_txidle(struct mgsl_struct * info)6165 static void usc_set_txidle( struct mgsl_struct *info )
6166 {
6167 u16 usc_idle_mode = IDLEMODE_FLAGS;
6168
6169 /* Map API idle mode to USC register bits */
6170
6171 switch( info->idle_mode ){
6172 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6173 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6174 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6175 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6176 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6177 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6178 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6179 }
6180
6181 info->usc_idle_mode = usc_idle_mode;
6182 //usc_OutReg(info, TCSR, usc_idle_mode);
6183 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6184 info->tcsr_value += usc_idle_mode;
6185 usc_OutReg(info, TCSR, info->tcsr_value);
6186
6187 /*
6188 * if SyncLink WAN adapter is running in external sync mode, the
6189 * transmitter has been set to Monosync in order to try to mimic
6190 * a true raw outbound bit stream. Monosync still sends an open/close
6191 * sync char at the start/end of a frame. Try to match those sync
6192 * patterns to the idle mode set here
6193 */
6194 if ( info->params.mode == MGSL_MODE_RAW ) {
6195 unsigned char syncpat = 0;
6196 switch( info->idle_mode ) {
6197 case HDLC_TXIDLE_FLAGS:
6198 syncpat = 0x7e;
6199 break;
6200 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6201 syncpat = 0x55;
6202 break;
6203 case HDLC_TXIDLE_ZEROS:
6204 case HDLC_TXIDLE_SPACE:
6205 syncpat = 0x00;
6206 break;
6207 case HDLC_TXIDLE_ONES:
6208 case HDLC_TXIDLE_MARK:
6209 syncpat = 0xff;
6210 break;
6211 case HDLC_TXIDLE_ALT_MARK_SPACE:
6212 syncpat = 0xaa;
6213 break;
6214 }
6215
6216 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6217 }
6218
6219 } /* end of usc_set_txidle() */
6220
6221 /* usc_get_serial_signals()
6222 *
6223 * Query the adapter for the state of the V24 status (input) signals.
6224 *
6225 * Arguments: info pointer to device instance data
6226 * Return Value: None
6227 */
usc_get_serial_signals(struct mgsl_struct * info)6228 static void usc_get_serial_signals( struct mgsl_struct *info )
6229 {
6230 u16 status;
6231
6232 /* clear all serial signals except DTR and RTS */
6233 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6234
6235 /* Read the Misc Interrupt status Register (MISR) to get */
6236 /* the V24 status signals. */
6237
6238 status = usc_InReg( info, MISR );
6239
6240 /* set serial signal bits to reflect MISR */
6241
6242 if ( status & MISCSTATUS_CTS )
6243 info->serial_signals |= SerialSignal_CTS;
6244
6245 if ( status & MISCSTATUS_DCD )
6246 info->serial_signals |= SerialSignal_DCD;
6247
6248 if ( status & MISCSTATUS_RI )
6249 info->serial_signals |= SerialSignal_RI;
6250
6251 if ( status & MISCSTATUS_DSR )
6252 info->serial_signals |= SerialSignal_DSR;
6253
6254 } /* end of usc_get_serial_signals() */
6255
6256 /* usc_set_serial_signals()
6257 *
6258 * Set the state of DTR and RTS based on contents of
6259 * serial_signals member of device extension.
6260 *
6261 * Arguments: info pointer to device instance data
6262 * Return Value: None
6263 */
usc_set_serial_signals(struct mgsl_struct * info)6264 static void usc_set_serial_signals( struct mgsl_struct *info )
6265 {
6266 u16 Control;
6267 unsigned char V24Out = info->serial_signals;
6268
6269 /* get the current value of the Port Control Register (PCR) */
6270
6271 Control = usc_InReg( info, PCR );
6272
6273 if ( V24Out & SerialSignal_RTS )
6274 Control &= ~(BIT6);
6275 else
6276 Control |= BIT6;
6277
6278 if ( V24Out & SerialSignal_DTR )
6279 Control &= ~(BIT4);
6280 else
6281 Control |= BIT4;
6282
6283 usc_OutReg( info, PCR, Control );
6284
6285 } /* end of usc_set_serial_signals() */
6286
6287 /* usc_enable_async_clock()
6288 *
6289 * Enable the async clock at the specified frequency.
6290 *
6291 * Arguments: info pointer to device instance data
6292 * data_rate data rate of clock in bps
6293 * 0 disables the AUX clock.
6294 * Return Value: None
6295 */
usc_enable_async_clock(struct mgsl_struct * info,u32 data_rate)6296 static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6297 {
6298 if ( data_rate ) {
6299 /*
6300 * Clock mode Control Register (CMCR)
6301 *
6302 * <15..14> 00 counter 1 Disabled
6303 * <13..12> 00 counter 0 Disabled
6304 * <11..10> 11 BRG1 Input is TxC Pin
6305 * <9..8> 11 BRG0 Input is TxC Pin
6306 * <7..6> 01 DPLL Input is BRG1 Output
6307 * <5..3> 100 TxCLK comes from BRG0
6308 * <2..0> 100 RxCLK comes from BRG0
6309 *
6310 * 0000 1111 0110 0100 = 0x0f64
6311 */
6312
6313 usc_OutReg( info, CMCR, 0x0f64 );
6314
6315
6316 /*
6317 * Write 16-bit Time Constant for BRG0
6318 * Time Constant = (ClkSpeed / data_rate) - 1
6319 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6320 */
6321
6322 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6323 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6324 else
6325 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6326
6327
6328 /*
6329 * Hardware Configuration Register (HCR)
6330 * Clear Bit 1, BRG0 mode = Continuous
6331 * Set Bit 0 to enable BRG0.
6332 */
6333
6334 usc_OutReg( info, HCR,
6335 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6336
6337
6338 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6339
6340 usc_OutReg( info, IOCR,
6341 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6342 } else {
6343 /* data rate == 0 so turn off BRG0 */
6344 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6345 }
6346
6347 } /* end of usc_enable_async_clock() */
6348
6349 /*
6350 * Buffer Structures:
6351 *
6352 * Normal memory access uses virtual addresses that can make discontiguous
6353 * physical memory pages appear to be contiguous in the virtual address
6354 * space (the processors memory mapping handles the conversions).
6355 *
6356 * DMA transfers require physically contiguous memory. This is because
6357 * the DMA system controller and DMA bus masters deal with memory using
6358 * only physical addresses.
6359 *
6360 * This causes a problem under Windows NT when large DMA buffers are
6361 * needed. Fragmentation of the nonpaged pool prevents allocations of
6362 * physically contiguous buffers larger than the PAGE_SIZE.
6363 *
6364 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6365 * allows DMA transfers to physically discontiguous buffers. Information
6366 * about each data transfer buffer is contained in a memory structure
6367 * called a 'buffer entry'. A list of buffer entries is maintained
6368 * to track and control the use of the data transfer buffers.
6369 *
6370 * To support this strategy we will allocate sufficient PAGE_SIZE
6371 * contiguous memory buffers to allow for the total required buffer
6372 * space.
6373 *
6374 * The 16C32 accesses the list of buffer entries using Bus Master
6375 * DMA. Control information is read from the buffer entries by the
6376 * 16C32 to control data transfers. status information is written to
6377 * the buffer entries by the 16C32 to indicate the status of completed
6378 * transfers.
6379 *
6380 * The CPU writes control information to the buffer entries to control
6381 * the 16C32 and reads status information from the buffer entries to
6382 * determine information about received and transmitted frames.
6383 *
6384 * Because the CPU and 16C32 (adapter) both need simultaneous access
6385 * to the buffer entries, the buffer entry memory is allocated with
6386 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6387 * entry list to PAGE_SIZE.
6388 *
6389 * The actual data buffers on the other hand will only be accessed
6390 * by the CPU or the adapter but not by both simultaneously. This allows
6391 * Scatter/Gather packet based DMA procedures for using physically
6392 * discontiguous pages.
6393 */
6394
6395 /*
6396 * mgsl_reset_tx_dma_buffers()
6397 *
6398 * Set the count for all transmit buffers to 0 to indicate the
6399 * buffer is available for use and set the current buffer to the
6400 * first buffer. This effectively makes all buffers free and
6401 * discards any data in buffers.
6402 *
6403 * Arguments: info pointer to device instance data
6404 * Return Value: None
6405 */
mgsl_reset_tx_dma_buffers(struct mgsl_struct * info)6406 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6407 {
6408 unsigned int i;
6409
6410 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6411 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6412 }
6413
6414 info->current_tx_buffer = 0;
6415 info->start_tx_dma_buffer = 0;
6416 info->tx_dma_buffers_used = 0;
6417
6418 info->get_tx_holding_index = 0;
6419 info->put_tx_holding_index = 0;
6420 info->tx_holding_count = 0;
6421
6422 } /* end of mgsl_reset_tx_dma_buffers() */
6423
6424 /*
6425 * num_free_tx_dma_buffers()
6426 *
6427 * returns the number of free tx dma buffers available
6428 *
6429 * Arguments: info pointer to device instance data
6430 * Return Value: number of free tx dma buffers
6431 */
num_free_tx_dma_buffers(struct mgsl_struct * info)6432 static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6433 {
6434 return info->tx_buffer_count - info->tx_dma_buffers_used;
6435 }
6436
6437 /*
6438 * mgsl_reset_rx_dma_buffers()
6439 *
6440 * Set the count for all receive buffers to DMABUFFERSIZE
6441 * and set the current buffer to the first buffer. This effectively
6442 * makes all buffers free and discards any data in buffers.
6443 *
6444 * Arguments: info pointer to device instance data
6445 * Return Value: None
6446 */
mgsl_reset_rx_dma_buffers(struct mgsl_struct * info)6447 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6448 {
6449 unsigned int i;
6450
6451 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6452 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6453 // info->rx_buffer_list[i].count = DMABUFFERSIZE;
6454 // info->rx_buffer_list[i].status = 0;
6455 }
6456
6457 info->current_rx_buffer = 0;
6458
6459 } /* end of mgsl_reset_rx_dma_buffers() */
6460
6461 /*
6462 * mgsl_free_rx_frame_buffers()
6463 *
6464 * Free the receive buffers used by a received SDLC
6465 * frame such that the buffers can be reused.
6466 *
6467 * Arguments:
6468 *
6469 * info pointer to device instance data
6470 * StartIndex index of 1st receive buffer of frame
6471 * EndIndex index of last receive buffer of frame
6472 *
6473 * Return Value: None
6474 */
mgsl_free_rx_frame_buffers(struct mgsl_struct * info,unsigned int StartIndex,unsigned int EndIndex)6475 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6476 {
6477 bool Done = false;
6478 DMABUFFERENTRY *pBufEntry;
6479 unsigned int Index;
6480
6481 /* Starting with 1st buffer entry of the frame clear the status */
6482 /* field and set the count field to DMA Buffer Size. */
6483
6484 Index = StartIndex;
6485
6486 while( !Done ) {
6487 pBufEntry = &(info->rx_buffer_list[Index]);
6488
6489 if ( Index == EndIndex ) {
6490 /* This is the last buffer of the frame! */
6491 Done = true;
6492 }
6493
6494 /* reset current buffer for reuse */
6495 // pBufEntry->status = 0;
6496 // pBufEntry->count = DMABUFFERSIZE;
6497 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6498
6499 /* advance to next buffer entry in linked list */
6500 Index++;
6501 if ( Index == info->rx_buffer_count )
6502 Index = 0;
6503 }
6504
6505 /* set current buffer to next buffer after last buffer of frame */
6506 info->current_rx_buffer = Index;
6507
6508 } /* end of free_rx_frame_buffers() */
6509
6510 /* mgsl_get_rx_frame()
6511 *
6512 * This function attempts to return a received SDLC frame from the
6513 * receive DMA buffers. Only frames received without errors are returned.
6514 *
6515 * Arguments: info pointer to device extension
6516 * Return Value: true if frame returned, otherwise false
6517 */
mgsl_get_rx_frame(struct mgsl_struct * info)6518 static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6519 {
6520 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6521 unsigned short status;
6522 DMABUFFERENTRY *pBufEntry;
6523 unsigned int framesize = 0;
6524 bool ReturnCode = false;
6525 unsigned long flags;
6526 struct tty_struct *tty = info->port.tty;
6527 bool return_frame = false;
6528
6529 /*
6530 * current_rx_buffer points to the 1st buffer of the next available
6531 * receive frame. To find the last buffer of the frame look for
6532 * a non-zero status field in the buffer entries. (The status
6533 * field is set by the 16C32 after completing a receive frame.
6534 */
6535
6536 StartIndex = EndIndex = info->current_rx_buffer;
6537
6538 while( !info->rx_buffer_list[EndIndex].status ) {
6539 /*
6540 * If the count field of the buffer entry is non-zero then
6541 * this buffer has not been used. (The 16C32 clears the count
6542 * field when it starts using the buffer.) If an unused buffer
6543 * is encountered then there are no frames available.
6544 */
6545
6546 if ( info->rx_buffer_list[EndIndex].count )
6547 goto Cleanup;
6548
6549 /* advance to next buffer entry in linked list */
6550 EndIndex++;
6551 if ( EndIndex == info->rx_buffer_count )
6552 EndIndex = 0;
6553
6554 /* if entire list searched then no frame available */
6555 if ( EndIndex == StartIndex ) {
6556 /* If this occurs then something bad happened,
6557 * all buffers have been 'used' but none mark
6558 * the end of a frame. Reset buffers and receiver.
6559 */
6560
6561 if ( info->rx_enabled ){
6562 spin_lock_irqsave(&info->irq_spinlock,flags);
6563 usc_start_receiver(info);
6564 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6565 }
6566 goto Cleanup;
6567 }
6568 }
6569
6570
6571 /* check status of receive frame */
6572
6573 status = info->rx_buffer_list[EndIndex].status;
6574
6575 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6576 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6577 if ( status & RXSTATUS_SHORT_FRAME )
6578 info->icount.rxshort++;
6579 else if ( status & RXSTATUS_ABORT )
6580 info->icount.rxabort++;
6581 else if ( status & RXSTATUS_OVERRUN )
6582 info->icount.rxover++;
6583 else {
6584 info->icount.rxcrc++;
6585 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6586 return_frame = true;
6587 }
6588 framesize = 0;
6589 #if SYNCLINK_GENERIC_HDLC
6590 {
6591 info->netdev->stats.rx_errors++;
6592 info->netdev->stats.rx_frame_errors++;
6593 }
6594 #endif
6595 } else
6596 return_frame = true;
6597
6598 if ( return_frame ) {
6599 /* receive frame has no errors, get frame size.
6600 * The frame size is the starting value of the RCC (which was
6601 * set to 0xffff) minus the ending value of the RCC (decremented
6602 * once for each receive character) minus 2 for the 16-bit CRC.
6603 */
6604
6605 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6606
6607 /* adjust frame size for CRC if any */
6608 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6609 framesize -= 2;
6610 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6611 framesize -= 4;
6612 }
6613
6614 if ( debug_level >= DEBUG_LEVEL_BH )
6615 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6616 __FILE__,__LINE__,info->device_name,status,framesize);
6617
6618 if ( debug_level >= DEBUG_LEVEL_DATA )
6619 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6620 min_t(int, framesize, DMABUFFERSIZE),0);
6621
6622 if (framesize) {
6623 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6624 ((framesize+1) > info->max_frame_size) ) ||
6625 (framesize > info->max_frame_size) )
6626 info->icount.rxlong++;
6627 else {
6628 /* copy dma buffer(s) to contiguous intermediate buffer */
6629 int copy_count = framesize;
6630 int index = StartIndex;
6631 unsigned char *ptmp = info->intermediate_rxbuffer;
6632
6633 if ( !(status & RXSTATUS_CRC_ERROR))
6634 info->icount.rxok++;
6635
6636 while(copy_count) {
6637 int partial_count;
6638 if ( copy_count > DMABUFFERSIZE )
6639 partial_count = DMABUFFERSIZE;
6640 else
6641 partial_count = copy_count;
6642
6643 pBufEntry = &(info->rx_buffer_list[index]);
6644 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6645 ptmp += partial_count;
6646 copy_count -= partial_count;
6647
6648 if ( ++index == info->rx_buffer_count )
6649 index = 0;
6650 }
6651
6652 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6653 ++framesize;
6654 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6655 RX_CRC_ERROR :
6656 RX_OK);
6657
6658 if ( debug_level >= DEBUG_LEVEL_DATA )
6659 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6660 __FILE__,__LINE__,info->device_name,
6661 *ptmp);
6662 }
6663
6664 #if SYNCLINK_GENERIC_HDLC
6665 if (info->netcount)
6666 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6667 else
6668 #endif
6669 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6670 }
6671 }
6672 /* Free the buffers used by this frame. */
6673 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6674
6675 ReturnCode = true;
6676
6677 Cleanup:
6678
6679 if ( info->rx_enabled && info->rx_overflow ) {
6680 /* The receiver needs to restarted because of
6681 * a receive overflow (buffer or FIFO). If the
6682 * receive buffers are now empty, then restart receiver.
6683 */
6684
6685 if ( !info->rx_buffer_list[EndIndex].status &&
6686 info->rx_buffer_list[EndIndex].count ) {
6687 spin_lock_irqsave(&info->irq_spinlock,flags);
6688 usc_start_receiver(info);
6689 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6690 }
6691 }
6692
6693 return ReturnCode;
6694
6695 } /* end of mgsl_get_rx_frame() */
6696
6697 /* mgsl_get_raw_rx_frame()
6698 *
6699 * This function attempts to return a received frame from the
6700 * receive DMA buffers when running in external loop mode. In this mode,
6701 * we will return at most one DMABUFFERSIZE frame to the application.
6702 * The USC receiver is triggering off of DCD going active to start a new
6703 * frame, and DCD going inactive to terminate the frame (similar to
6704 * processing a closing flag character).
6705 *
6706 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6707 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6708 * status field and the RCC field will indicate the length of the
6709 * entire received frame. We take this RCC field and get the modulus
6710 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6711 * last Rx DMA buffer and return that last portion of the frame.
6712 *
6713 * Arguments: info pointer to device extension
6714 * Return Value: true if frame returned, otherwise false
6715 */
mgsl_get_raw_rx_frame(struct mgsl_struct * info)6716 static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6717 {
6718 unsigned int CurrentIndex, NextIndex;
6719 unsigned short status;
6720 DMABUFFERENTRY *pBufEntry;
6721 unsigned int framesize = 0;
6722 bool ReturnCode = false;
6723 unsigned long flags;
6724 struct tty_struct *tty = info->port.tty;
6725
6726 /*
6727 * current_rx_buffer points to the 1st buffer of the next available
6728 * receive frame. The status field is set by the 16C32 after
6729 * completing a receive frame. If the status field of this buffer
6730 * is zero, either the USC is still filling this buffer or this
6731 * is one of a series of buffers making up a received frame.
6732 *
6733 * If the count field of this buffer is zero, the USC is either
6734 * using this buffer or has used this buffer. Look at the count
6735 * field of the next buffer. If that next buffer's count is
6736 * non-zero, the USC is still actively using the current buffer.
6737 * Otherwise, if the next buffer's count field is zero, the
6738 * current buffer is complete and the USC is using the next
6739 * buffer.
6740 */
6741 CurrentIndex = NextIndex = info->current_rx_buffer;
6742 ++NextIndex;
6743 if ( NextIndex == info->rx_buffer_count )
6744 NextIndex = 0;
6745
6746 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6747 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6748 info->rx_buffer_list[NextIndex].count == 0)) {
6749 /*
6750 * Either the status field of this dma buffer is non-zero
6751 * (indicating the last buffer of a receive frame) or the next
6752 * buffer is marked as in use -- implying this buffer is complete
6753 * and an intermediate buffer for this received frame.
6754 */
6755
6756 status = info->rx_buffer_list[CurrentIndex].status;
6757
6758 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6759 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6760 if ( status & RXSTATUS_SHORT_FRAME )
6761 info->icount.rxshort++;
6762 else if ( status & RXSTATUS_ABORT )
6763 info->icount.rxabort++;
6764 else if ( status & RXSTATUS_OVERRUN )
6765 info->icount.rxover++;
6766 else
6767 info->icount.rxcrc++;
6768 framesize = 0;
6769 } else {
6770 /*
6771 * A receive frame is available, get frame size and status.
6772 *
6773 * The frame size is the starting value of the RCC (which was
6774 * set to 0xffff) minus the ending value of the RCC (decremented
6775 * once for each receive character) minus 2 or 4 for the 16-bit
6776 * or 32-bit CRC.
6777 *
6778 * If the status field is zero, this is an intermediate buffer.
6779 * It's size is 4K.
6780 *
6781 * If the DMA Buffer Entry's Status field is non-zero, the
6782 * receive operation completed normally (ie: DCD dropped). The
6783 * RCC field is valid and holds the received frame size.
6784 * It is possible that the RCC field will be zero on a DMA buffer
6785 * entry with a non-zero status. This can occur if the total
6786 * frame size (number of bytes between the time DCD goes active
6787 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6788 * case the 16C32 has underrun on the RCC count and appears to
6789 * stop updating this counter to let us know the actual received
6790 * frame size. If this happens (non-zero status and zero RCC),
6791 * simply return the entire RxDMA Buffer
6792 */
6793 if ( status ) {
6794 /*
6795 * In the event that the final RxDMA Buffer is
6796 * terminated with a non-zero status and the RCC
6797 * field is zero, we interpret this as the RCC
6798 * having underflowed (received frame > 65535 bytes).
6799 *
6800 * Signal the event to the user by passing back
6801 * a status of RxStatus_CrcError returning the full
6802 * buffer and let the app figure out what data is
6803 * actually valid
6804 */
6805 if ( info->rx_buffer_list[CurrentIndex].rcc )
6806 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6807 else
6808 framesize = DMABUFFERSIZE;
6809 }
6810 else
6811 framesize = DMABUFFERSIZE;
6812 }
6813
6814 if ( framesize > DMABUFFERSIZE ) {
6815 /*
6816 * if running in raw sync mode, ISR handler for
6817 * End Of Buffer events terminates all buffers at 4K.
6818 * If this frame size is said to be >4K, get the
6819 * actual number of bytes of the frame in this buffer.
6820 */
6821 framesize = framesize % DMABUFFERSIZE;
6822 }
6823
6824
6825 if ( debug_level >= DEBUG_LEVEL_BH )
6826 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6827 __FILE__,__LINE__,info->device_name,status,framesize);
6828
6829 if ( debug_level >= DEBUG_LEVEL_DATA )
6830 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6831 min_t(int, framesize, DMABUFFERSIZE),0);
6832
6833 if (framesize) {
6834 /* copy dma buffer(s) to contiguous intermediate buffer */
6835 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6836
6837 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6838 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6839 info->icount.rxok++;
6840
6841 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6842 }
6843
6844 /* Free the buffers used by this frame. */
6845 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6846
6847 ReturnCode = true;
6848 }
6849
6850
6851 if ( info->rx_enabled && info->rx_overflow ) {
6852 /* The receiver needs to restarted because of
6853 * a receive overflow (buffer or FIFO). If the
6854 * receive buffers are now empty, then restart receiver.
6855 */
6856
6857 if ( !info->rx_buffer_list[CurrentIndex].status &&
6858 info->rx_buffer_list[CurrentIndex].count ) {
6859 spin_lock_irqsave(&info->irq_spinlock,flags);
6860 usc_start_receiver(info);
6861 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6862 }
6863 }
6864
6865 return ReturnCode;
6866
6867 } /* end of mgsl_get_raw_rx_frame() */
6868
6869 /* mgsl_load_tx_dma_buffer()
6870 *
6871 * Load the transmit DMA buffer with the specified data.
6872 *
6873 * Arguments:
6874 *
6875 * info pointer to device extension
6876 * Buffer pointer to buffer containing frame to load
6877 * BufferSize size in bytes of frame in Buffer
6878 *
6879 * Return Value: None
6880 */
mgsl_load_tx_dma_buffer(struct mgsl_struct * info,const char * Buffer,unsigned int BufferSize)6881 static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6882 const char *Buffer, unsigned int BufferSize)
6883 {
6884 unsigned short Copycount;
6885 unsigned int i = 0;
6886 DMABUFFERENTRY *pBufEntry;
6887
6888 if ( debug_level >= DEBUG_LEVEL_DATA )
6889 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6890
6891 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6892 /* set CMR:13 to start transmit when
6893 * next GoAhead (abort) is received
6894 */
6895 info->cmr_value |= BIT13;
6896 }
6897
6898 /* begin loading the frame in the next available tx dma
6899 * buffer, remember it's starting location for setting
6900 * up tx dma operation
6901 */
6902 i = info->current_tx_buffer;
6903 info->start_tx_dma_buffer = i;
6904
6905 /* Setup the status and RCC (Frame Size) fields of the 1st */
6906 /* buffer entry in the transmit DMA buffer list. */
6907
6908 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6909 info->tx_buffer_list[i].rcc = BufferSize;
6910 info->tx_buffer_list[i].count = BufferSize;
6911
6912 /* Copy frame data from 1st source buffer to the DMA buffers. */
6913 /* The frame data may span multiple DMA buffers. */
6914
6915 while( BufferSize ){
6916 /* Get a pointer to next DMA buffer entry. */
6917 pBufEntry = &info->tx_buffer_list[i++];
6918
6919 if ( i == info->tx_buffer_count )
6920 i=0;
6921
6922 /* Calculate the number of bytes that can be copied from */
6923 /* the source buffer to this DMA buffer. */
6924 if ( BufferSize > DMABUFFERSIZE )
6925 Copycount = DMABUFFERSIZE;
6926 else
6927 Copycount = BufferSize;
6928
6929 /* Actually copy data from source buffer to DMA buffer. */
6930 /* Also set the data count for this individual DMA buffer. */
6931 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6932 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6933 else
6934 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6935
6936 pBufEntry->count = Copycount;
6937
6938 /* Advance source pointer and reduce remaining data count. */
6939 Buffer += Copycount;
6940 BufferSize -= Copycount;
6941
6942 ++info->tx_dma_buffers_used;
6943 }
6944
6945 /* remember next available tx dma buffer */
6946 info->current_tx_buffer = i;
6947
6948 } /* end of mgsl_load_tx_dma_buffer() */
6949
6950 /*
6951 * mgsl_register_test()
6952 *
6953 * Performs a register test of the 16C32.
6954 *
6955 * Arguments: info pointer to device instance data
6956 * Return Value: true if test passed, otherwise false
6957 */
mgsl_register_test(struct mgsl_struct * info)6958 static bool mgsl_register_test( struct mgsl_struct *info )
6959 {
6960 static unsigned short BitPatterns[] =
6961 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6962 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6963 unsigned int i;
6964 bool rc = true;
6965 unsigned long flags;
6966
6967 spin_lock_irqsave(&info->irq_spinlock,flags);
6968 usc_reset(info);
6969
6970 /* Verify the reset state of some registers. */
6971
6972 if ( (usc_InReg( info, SICR ) != 0) ||
6973 (usc_InReg( info, IVR ) != 0) ||
6974 (usc_InDmaReg( info, DIVR ) != 0) ){
6975 rc = false;
6976 }
6977
6978 if ( rc ){
6979 /* Write bit patterns to various registers but do it out of */
6980 /* sync, then read back and verify values. */
6981
6982 for ( i = 0 ; i < Patterncount ; i++ ) {
6983 usc_OutReg( info, TC0R, BitPatterns[i] );
6984 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6985 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6986 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6987 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6988 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6989
6990 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6991 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6992 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6993 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6994 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
6995 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6996 rc = false;
6997 break;
6998 }
6999 }
7000 }
7001
7002 usc_reset(info);
7003 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7004
7005 return rc;
7006
7007 } /* end of mgsl_register_test() */
7008
7009 /* mgsl_irq_test() Perform interrupt test of the 16C32.
7010 *
7011 * Arguments: info pointer to device instance data
7012 * Return Value: true if test passed, otherwise false
7013 */
mgsl_irq_test(struct mgsl_struct * info)7014 static bool mgsl_irq_test( struct mgsl_struct *info )
7015 {
7016 unsigned long EndTime;
7017 unsigned long flags;
7018
7019 spin_lock_irqsave(&info->irq_spinlock,flags);
7020 usc_reset(info);
7021
7022 /*
7023 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7024 * The ISR sets irq_occurred to true.
7025 */
7026
7027 info->irq_occurred = false;
7028
7029 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7030 /* Enable INTEN (Port 6, Bit12) */
7031 /* This connects the IRQ request signal to the ISA bus */
7032 /* on the ISA adapter. This has no effect for the PCI adapter */
7033 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7034
7035 usc_EnableMasterIrqBit(info);
7036 usc_EnableInterrupts(info, IO_PIN);
7037 usc_ClearIrqPendingBits(info, IO_PIN);
7038
7039 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7040 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7041
7042 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7043
7044 EndTime=100;
7045 while( EndTime-- && !info->irq_occurred ) {
7046 msleep_interruptible(10);
7047 }
7048
7049 spin_lock_irqsave(&info->irq_spinlock,flags);
7050 usc_reset(info);
7051 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7052
7053 return info->irq_occurred;
7054
7055 } /* end of mgsl_irq_test() */
7056
7057 /* mgsl_dma_test()
7058 *
7059 * Perform a DMA test of the 16C32. A small frame is
7060 * transmitted via DMA from a transmit buffer to a receive buffer
7061 * using single buffer DMA mode.
7062 *
7063 * Arguments: info pointer to device instance data
7064 * Return Value: true if test passed, otherwise false
7065 */
mgsl_dma_test(struct mgsl_struct * info)7066 static bool mgsl_dma_test( struct mgsl_struct *info )
7067 {
7068 unsigned short FifoLevel;
7069 unsigned long phys_addr;
7070 unsigned int FrameSize;
7071 unsigned int i;
7072 char *TmpPtr;
7073 bool rc = true;
7074 unsigned short status=0;
7075 unsigned long EndTime;
7076 unsigned long flags;
7077 MGSL_PARAMS tmp_params;
7078
7079 /* save current port options */
7080 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7081 /* load default port options */
7082 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7083
7084 #define TESTFRAMESIZE 40
7085
7086 spin_lock_irqsave(&info->irq_spinlock,flags);
7087
7088 /* setup 16C32 for SDLC DMA transfer mode */
7089
7090 usc_reset(info);
7091 usc_set_sdlc_mode(info);
7092 usc_enable_loopback(info,1);
7093
7094 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7095 * field of the buffer entry after fetching buffer address. This
7096 * way we can detect a DMA failure for a DMA read (which should be
7097 * non-destructive to system memory) before we try and write to
7098 * memory (where a failure could corrupt system memory).
7099 */
7100
7101 /* Receive DMA mode Register (RDMR)
7102 *
7103 * <15..14> 11 DMA mode = Linked List Buffer mode
7104 * <13> 1 RSBinA/L = store Rx status Block in List entry
7105 * <12> 0 1 = Clear count of List Entry after fetching
7106 * <11..10> 00 Address mode = Increment
7107 * <9> 1 Terminate Buffer on RxBound
7108 * <8> 0 Bus Width = 16bits
7109 * <7..0> ? status Bits (write as 0s)
7110 *
7111 * 1110 0010 0000 0000 = 0xe200
7112 */
7113
7114 usc_OutDmaReg( info, RDMR, 0xe200 );
7115
7116 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7117
7118
7119 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7120
7121 FrameSize = TESTFRAMESIZE;
7122
7123 /* setup 1st transmit buffer entry: */
7124 /* with frame size and transmit control word */
7125
7126 info->tx_buffer_list[0].count = FrameSize;
7127 info->tx_buffer_list[0].rcc = FrameSize;
7128 info->tx_buffer_list[0].status = 0x4000;
7129
7130 /* build a transmit frame in 1st transmit DMA buffer */
7131
7132 TmpPtr = info->tx_buffer_list[0].virt_addr;
7133 for (i = 0; i < FrameSize; i++ )
7134 *TmpPtr++ = i;
7135
7136 /* setup 1st receive buffer entry: */
7137 /* clear status, set max receive buffer size */
7138
7139 info->rx_buffer_list[0].status = 0;
7140 info->rx_buffer_list[0].count = FrameSize + 4;
7141
7142 /* zero out the 1st receive buffer */
7143
7144 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7145
7146 /* Set count field of next buffer entries to prevent */
7147 /* 16C32 from using buffers after the 1st one. */
7148
7149 info->tx_buffer_list[1].count = 0;
7150 info->rx_buffer_list[1].count = 0;
7151
7152
7153 /***************************/
7154 /* Program 16C32 receiver. */
7155 /***************************/
7156
7157 spin_lock_irqsave(&info->irq_spinlock,flags);
7158
7159 /* setup DMA transfers */
7160 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7161
7162 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7163 phys_addr = info->rx_buffer_list[0].phys_entry;
7164 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7165 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7166
7167 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7168 usc_InDmaReg( info, RDMR );
7169 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7170
7171 /* Enable Receiver (RMR <1..0> = 10) */
7172 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7173
7174 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7175
7176
7177 /*************************************************************/
7178 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7179 /*************************************************************/
7180
7181 /* Wait 100ms for interrupt. */
7182 EndTime = jiffies + msecs_to_jiffies(100);
7183
7184 for(;;) {
7185 if (time_after(jiffies, EndTime)) {
7186 rc = false;
7187 break;
7188 }
7189
7190 spin_lock_irqsave(&info->irq_spinlock,flags);
7191 status = usc_InDmaReg( info, RDMR );
7192 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7193
7194 if ( !(status & BIT4) && (status & BIT5) ) {
7195 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7196 /* BUSY (BIT 5) is active (channel still active). */
7197 /* This means the buffer entry read has completed. */
7198 break;
7199 }
7200 }
7201
7202
7203 /******************************/
7204 /* Program 16C32 transmitter. */
7205 /******************************/
7206
7207 spin_lock_irqsave(&info->irq_spinlock,flags);
7208
7209 /* Program the Transmit Character Length Register (TCLR) */
7210 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7211
7212 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7213 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7214
7215 /* Program the address of the 1st DMA Buffer Entry in linked list */
7216
7217 phys_addr = info->tx_buffer_list[0].phys_entry;
7218 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7219 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7220
7221 /* unlatch Tx status bits, and start transmit channel. */
7222
7223 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7224 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7225
7226 /* wait for DMA controller to fill transmit FIFO */
7227
7228 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7229
7230 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7231
7232
7233 /**********************************/
7234 /* WAIT FOR TRANSMIT FIFO TO FILL */
7235 /**********************************/
7236
7237 /* Wait 100ms */
7238 EndTime = jiffies + msecs_to_jiffies(100);
7239
7240 for(;;) {
7241 if (time_after(jiffies, EndTime)) {
7242 rc = false;
7243 break;
7244 }
7245
7246 spin_lock_irqsave(&info->irq_spinlock,flags);
7247 FifoLevel = usc_InReg(info, TICR) >> 8;
7248 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7249
7250 if ( FifoLevel < 16 )
7251 break;
7252 else
7253 if ( FrameSize < 32 ) {
7254 /* This frame is smaller than the entire transmit FIFO */
7255 /* so wait for the entire frame to be loaded. */
7256 if ( FifoLevel <= (32 - FrameSize) )
7257 break;
7258 }
7259 }
7260
7261
7262 if ( rc )
7263 {
7264 /* Enable 16C32 transmitter. */
7265
7266 spin_lock_irqsave(&info->irq_spinlock,flags);
7267
7268 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7269 usc_TCmd( info, TCmd_SendFrame );
7270 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7271
7272 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7273
7274
7275 /******************************/
7276 /* WAIT FOR TRANSMIT COMPLETE */
7277 /******************************/
7278
7279 /* Wait 100ms */
7280 EndTime = jiffies + msecs_to_jiffies(100);
7281
7282 /* While timer not expired wait for transmit complete */
7283
7284 spin_lock_irqsave(&info->irq_spinlock,flags);
7285 status = usc_InReg( info, TCSR );
7286 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7287
7288 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7289 if (time_after(jiffies, EndTime)) {
7290 rc = false;
7291 break;
7292 }
7293
7294 spin_lock_irqsave(&info->irq_spinlock,flags);
7295 status = usc_InReg( info, TCSR );
7296 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7297 }
7298 }
7299
7300
7301 if ( rc ){
7302 /* CHECK FOR TRANSMIT ERRORS */
7303 if ( status & (BIT5 + BIT1) )
7304 rc = false;
7305 }
7306
7307 if ( rc ) {
7308 /* WAIT FOR RECEIVE COMPLETE */
7309
7310 /* Wait 100ms */
7311 EndTime = jiffies + msecs_to_jiffies(100);
7312
7313 /* Wait for 16C32 to write receive status to buffer entry. */
7314 status=info->rx_buffer_list[0].status;
7315 while ( status == 0 ) {
7316 if (time_after(jiffies, EndTime)) {
7317 rc = false;
7318 break;
7319 }
7320 status=info->rx_buffer_list[0].status;
7321 }
7322 }
7323
7324
7325 if ( rc ) {
7326 /* CHECK FOR RECEIVE ERRORS */
7327 status = info->rx_buffer_list[0].status;
7328
7329 if ( status & (BIT8 + BIT3 + BIT1) ) {
7330 /* receive error has occurred */
7331 rc = false;
7332 } else {
7333 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7334 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7335 rc = false;
7336 }
7337 }
7338 }
7339
7340 spin_lock_irqsave(&info->irq_spinlock,flags);
7341 usc_reset( info );
7342 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7343
7344 /* restore current port options */
7345 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7346
7347 return rc;
7348
7349 } /* end of mgsl_dma_test() */
7350
7351 /* mgsl_adapter_test()
7352 *
7353 * Perform the register, IRQ, and DMA tests for the 16C32.
7354 *
7355 * Arguments: info pointer to device instance data
7356 * Return Value: 0 if success, otherwise -ENODEV
7357 */
mgsl_adapter_test(struct mgsl_struct * info)7358 static int mgsl_adapter_test( struct mgsl_struct *info )
7359 {
7360 if ( debug_level >= DEBUG_LEVEL_INFO )
7361 printk( "%s(%d):Testing device %s\n",
7362 __FILE__,__LINE__,info->device_name );
7363
7364 if ( !mgsl_register_test( info ) ) {
7365 info->init_error = DiagStatus_AddressFailure;
7366 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7367 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7368 return -ENODEV;
7369 }
7370
7371 if ( !mgsl_irq_test( info ) ) {
7372 info->init_error = DiagStatus_IrqFailure;
7373 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7374 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7375 return -ENODEV;
7376 }
7377
7378 if ( !mgsl_dma_test( info ) ) {
7379 info->init_error = DiagStatus_DmaFailure;
7380 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7381 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7382 return -ENODEV;
7383 }
7384
7385 if ( debug_level >= DEBUG_LEVEL_INFO )
7386 printk( "%s(%d):device %s passed diagnostics\n",
7387 __FILE__,__LINE__,info->device_name );
7388
7389 return 0;
7390
7391 } /* end of mgsl_adapter_test() */
7392
7393 /* mgsl_memory_test()
7394 *
7395 * Test the shared memory on a PCI adapter.
7396 *
7397 * Arguments: info pointer to device instance data
7398 * Return Value: true if test passed, otherwise false
7399 */
mgsl_memory_test(struct mgsl_struct * info)7400 static bool mgsl_memory_test( struct mgsl_struct *info )
7401 {
7402 static unsigned long BitPatterns[] =
7403 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7404 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7405 unsigned long i;
7406 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7407 unsigned long * TestAddr;
7408
7409 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7410 return true;
7411
7412 TestAddr = (unsigned long *)info->memory_base;
7413
7414 /* Test data lines with test pattern at one location. */
7415
7416 for ( i = 0 ; i < Patterncount ; i++ ) {
7417 *TestAddr = BitPatterns[i];
7418 if ( *TestAddr != BitPatterns[i] )
7419 return false;
7420 }
7421
7422 /* Test address lines with incrementing pattern over */
7423 /* entire address range. */
7424
7425 for ( i = 0 ; i < TestLimit ; i++ ) {
7426 *TestAddr = i * 4;
7427 TestAddr++;
7428 }
7429
7430 TestAddr = (unsigned long *)info->memory_base;
7431
7432 for ( i = 0 ; i < TestLimit ; i++ ) {
7433 if ( *TestAddr != i * 4 )
7434 return false;
7435 TestAddr++;
7436 }
7437
7438 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7439
7440 return true;
7441
7442 } /* End Of mgsl_memory_test() */
7443
7444
7445 /* mgsl_load_pci_memory()
7446 *
7447 * Load a large block of data into the PCI shared memory.
7448 * Use this instead of memcpy() or memmove() to move data
7449 * into the PCI shared memory.
7450 *
7451 * Notes:
7452 *
7453 * This function prevents the PCI9050 interface chip from hogging
7454 * the adapter local bus, which can starve the 16C32 by preventing
7455 * 16C32 bus master cycles.
7456 *
7457 * The PCI9050 documentation says that the 9050 will always release
7458 * control of the local bus after completing the current read
7459 * or write operation.
7460 *
7461 * It appears that as long as the PCI9050 write FIFO is full, the
7462 * PCI9050 treats all of the writes as a single burst transaction
7463 * and will not release the bus. This causes DMA latency problems
7464 * at high speeds when copying large data blocks to the shared
7465 * memory.
7466 *
7467 * This function in effect, breaks the a large shared memory write
7468 * into multiple transations by interleaving a shared memory read
7469 * which will flush the write FIFO and 'complete' the write
7470 * transation. This allows any pending DMA request to gain control
7471 * of the local bus in a timely fasion.
7472 *
7473 * Arguments:
7474 *
7475 * TargetPtr pointer to target address in PCI shared memory
7476 * SourcePtr pointer to source buffer for data
7477 * count count in bytes of data to copy
7478 *
7479 * Return Value: None
7480 */
mgsl_load_pci_memory(char * TargetPtr,const char * SourcePtr,unsigned short count)7481 static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7482 unsigned short count )
7483 {
7484 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7485 #define PCI_LOAD_INTERVAL 64
7486
7487 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7488 unsigned short Index;
7489 unsigned long Dummy;
7490
7491 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7492 {
7493 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7494 Dummy = *((volatile unsigned long *)TargetPtr);
7495 TargetPtr += PCI_LOAD_INTERVAL;
7496 SourcePtr += PCI_LOAD_INTERVAL;
7497 }
7498
7499 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7500
7501 } /* End Of mgsl_load_pci_memory() */
7502
mgsl_trace_block(struct mgsl_struct * info,const char * data,int count,int xmit)7503 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7504 {
7505 int i;
7506 int linecount;
7507 if (xmit)
7508 printk("%s tx data:\n",info->device_name);
7509 else
7510 printk("%s rx data:\n",info->device_name);
7511
7512 while(count) {
7513 if (count > 16)
7514 linecount = 16;
7515 else
7516 linecount = count;
7517
7518 for(i=0;i<linecount;i++)
7519 printk("%02X ",(unsigned char)data[i]);
7520 for(;i<17;i++)
7521 printk(" ");
7522 for(i=0;i<linecount;i++) {
7523 if (data[i]>=040 && data[i]<=0176)
7524 printk("%c",data[i]);
7525 else
7526 printk(".");
7527 }
7528 printk("\n");
7529
7530 data += linecount;
7531 count -= linecount;
7532 }
7533 } /* end of mgsl_trace_block() */
7534
7535 /* mgsl_tx_timeout()
7536 *
7537 * called when HDLC frame times out
7538 * update stats and do tx completion processing
7539 *
7540 * Arguments: context pointer to device instance data
7541 * Return Value: None
7542 */
mgsl_tx_timeout(unsigned long context)7543 static void mgsl_tx_timeout(unsigned long context)
7544 {
7545 struct mgsl_struct *info = (struct mgsl_struct*)context;
7546 unsigned long flags;
7547
7548 if ( debug_level >= DEBUG_LEVEL_INFO )
7549 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7550 __FILE__,__LINE__,info->device_name);
7551 if(info->tx_active &&
7552 (info->params.mode == MGSL_MODE_HDLC ||
7553 info->params.mode == MGSL_MODE_RAW) ) {
7554 info->icount.txtimeout++;
7555 }
7556 spin_lock_irqsave(&info->irq_spinlock,flags);
7557 info->tx_active = false;
7558 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7559
7560 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7561 usc_loopmode_cancel_transmit( info );
7562
7563 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7564
7565 #if SYNCLINK_GENERIC_HDLC
7566 if (info->netcount)
7567 hdlcdev_tx_done(info);
7568 else
7569 #endif
7570 mgsl_bh_transmit(info);
7571
7572 } /* end of mgsl_tx_timeout() */
7573
7574 /* signal that there are no more frames to send, so that
7575 * line is 'released' by echoing RxD to TxD when current
7576 * transmission is complete (or immediately if no tx in progress).
7577 */
mgsl_loopmode_send_done(struct mgsl_struct * info)7578 static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7579 {
7580 unsigned long flags;
7581
7582 spin_lock_irqsave(&info->irq_spinlock,flags);
7583 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7584 if (info->tx_active)
7585 info->loopmode_send_done_requested = true;
7586 else
7587 usc_loopmode_send_done(info);
7588 }
7589 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7590
7591 return 0;
7592 }
7593
7594 /* release the line by echoing RxD to TxD
7595 * upon completion of a transmit frame
7596 */
usc_loopmode_send_done(struct mgsl_struct * info)7597 static void usc_loopmode_send_done( struct mgsl_struct * info )
7598 {
7599 info->loopmode_send_done_requested = false;
7600 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7601 info->cmr_value &= ~BIT13;
7602 usc_OutReg(info, CMR, info->cmr_value);
7603 }
7604
7605 /* abort a transmit in progress while in HDLC LoopMode
7606 */
usc_loopmode_cancel_transmit(struct mgsl_struct * info)7607 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7608 {
7609 /* reset tx dma channel and purge TxFifo */
7610 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7611 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7612 usc_loopmode_send_done( info );
7613 }
7614
7615 /* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7616 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7617 * we must clear CMR:13 to begin repeating TxData to RxData
7618 */
usc_loopmode_insert_request(struct mgsl_struct * info)7619 static void usc_loopmode_insert_request( struct mgsl_struct * info )
7620 {
7621 info->loopmode_insert_requested = true;
7622
7623 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7624 * begin repeating TxData on RxData (complete insertion)
7625 */
7626 usc_OutReg( info, RICR,
7627 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7628
7629 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7630 info->cmr_value |= BIT13;
7631 usc_OutReg(info, CMR, info->cmr_value);
7632 }
7633
7634 /* return 1 if station is inserted into the loop, otherwise 0
7635 */
usc_loopmode_active(struct mgsl_struct * info)7636 static int usc_loopmode_active( struct mgsl_struct * info)
7637 {
7638 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7639 }
7640
7641 #if SYNCLINK_GENERIC_HDLC
7642
7643 /**
7644 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7645 * set encoding and frame check sequence (FCS) options
7646 *
7647 * dev pointer to network device structure
7648 * encoding serial encoding setting
7649 * parity FCS setting
7650 *
7651 * returns 0 if success, otherwise error code
7652 */
hdlcdev_attach(struct net_device * dev,unsigned short encoding,unsigned short parity)7653 static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7654 unsigned short parity)
7655 {
7656 struct mgsl_struct *info = dev_to_port(dev);
7657 unsigned char new_encoding;
7658 unsigned short new_crctype;
7659
7660 /* return error if TTY interface open */
7661 if (info->port.count)
7662 return -EBUSY;
7663
7664 switch (encoding)
7665 {
7666 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7667 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7668 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7669 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7670 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7671 default: return -EINVAL;
7672 }
7673
7674 switch (parity)
7675 {
7676 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7677 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7678 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7679 default: return -EINVAL;
7680 }
7681
7682 info->params.encoding = new_encoding;
7683 info->params.crc_type = new_crctype;
7684
7685 /* if network interface up, reprogram hardware */
7686 if (info->netcount)
7687 mgsl_program_hw(info);
7688
7689 return 0;
7690 }
7691
7692 /**
7693 * called by generic HDLC layer to send frame
7694 *
7695 * skb socket buffer containing HDLC frame
7696 * dev pointer to network device structure
7697 */
hdlcdev_xmit(struct sk_buff * skb,struct net_device * dev)7698 static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7699 struct net_device *dev)
7700 {
7701 struct mgsl_struct *info = dev_to_port(dev);
7702 unsigned long flags;
7703
7704 if (debug_level >= DEBUG_LEVEL_INFO)
7705 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7706
7707 /* stop sending until this frame completes */
7708 netif_stop_queue(dev);
7709
7710 /* copy data to device buffers */
7711 info->xmit_cnt = skb->len;
7712 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7713
7714 /* update network statistics */
7715 dev->stats.tx_packets++;
7716 dev->stats.tx_bytes += skb->len;
7717
7718 /* done with socket buffer, so free it */
7719 dev_kfree_skb(skb);
7720
7721 /* save start time for transmit timeout detection */
7722 dev->trans_start = jiffies;
7723
7724 /* start hardware transmitter if necessary */
7725 spin_lock_irqsave(&info->irq_spinlock,flags);
7726 if (!info->tx_active)
7727 usc_start_transmitter(info);
7728 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7729
7730 return NETDEV_TX_OK;
7731 }
7732
7733 /**
7734 * called by network layer when interface enabled
7735 * claim resources and initialize hardware
7736 *
7737 * dev pointer to network device structure
7738 *
7739 * returns 0 if success, otherwise error code
7740 */
hdlcdev_open(struct net_device * dev)7741 static int hdlcdev_open(struct net_device *dev)
7742 {
7743 struct mgsl_struct *info = dev_to_port(dev);
7744 int rc;
7745 unsigned long flags;
7746
7747 if (debug_level >= DEBUG_LEVEL_INFO)
7748 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7749
7750 /* generic HDLC layer open processing */
7751 if ((rc = hdlc_open(dev)))
7752 return rc;
7753
7754 /* arbitrate between network and tty opens */
7755 spin_lock_irqsave(&info->netlock, flags);
7756 if (info->port.count != 0 || info->netcount != 0) {
7757 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7758 spin_unlock_irqrestore(&info->netlock, flags);
7759 return -EBUSY;
7760 }
7761 info->netcount=1;
7762 spin_unlock_irqrestore(&info->netlock, flags);
7763
7764 /* claim resources and init adapter */
7765 if ((rc = startup(info)) != 0) {
7766 spin_lock_irqsave(&info->netlock, flags);
7767 info->netcount=0;
7768 spin_unlock_irqrestore(&info->netlock, flags);
7769 return rc;
7770 }
7771
7772 /* assert DTR and RTS, apply hardware settings */
7773 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7774 mgsl_program_hw(info);
7775
7776 /* enable network layer transmit */
7777 dev->trans_start = jiffies;
7778 netif_start_queue(dev);
7779
7780 /* inform generic HDLC layer of current DCD status */
7781 spin_lock_irqsave(&info->irq_spinlock, flags);
7782 usc_get_serial_signals(info);
7783 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7784 if (info->serial_signals & SerialSignal_DCD)
7785 netif_carrier_on(dev);
7786 else
7787 netif_carrier_off(dev);
7788 return 0;
7789 }
7790
7791 /**
7792 * called by network layer when interface is disabled
7793 * shutdown hardware and release resources
7794 *
7795 * dev pointer to network device structure
7796 *
7797 * returns 0 if success, otherwise error code
7798 */
hdlcdev_close(struct net_device * dev)7799 static int hdlcdev_close(struct net_device *dev)
7800 {
7801 struct mgsl_struct *info = dev_to_port(dev);
7802 unsigned long flags;
7803
7804 if (debug_level >= DEBUG_LEVEL_INFO)
7805 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7806
7807 netif_stop_queue(dev);
7808
7809 /* shutdown adapter and release resources */
7810 shutdown(info);
7811
7812 hdlc_close(dev);
7813
7814 spin_lock_irqsave(&info->netlock, flags);
7815 info->netcount=0;
7816 spin_unlock_irqrestore(&info->netlock, flags);
7817
7818 return 0;
7819 }
7820
7821 /**
7822 * called by network layer to process IOCTL call to network device
7823 *
7824 * dev pointer to network device structure
7825 * ifr pointer to network interface request structure
7826 * cmd IOCTL command code
7827 *
7828 * returns 0 if success, otherwise error code
7829 */
hdlcdev_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)7830 static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7831 {
7832 const size_t size = sizeof(sync_serial_settings);
7833 sync_serial_settings new_line;
7834 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7835 struct mgsl_struct *info = dev_to_port(dev);
7836 unsigned int flags;
7837
7838 if (debug_level >= DEBUG_LEVEL_INFO)
7839 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7840
7841 /* return error if TTY interface open */
7842 if (info->port.count)
7843 return -EBUSY;
7844
7845 if (cmd != SIOCWANDEV)
7846 return hdlc_ioctl(dev, ifr, cmd);
7847
7848 switch(ifr->ifr_settings.type) {
7849 case IF_GET_IFACE: /* return current sync_serial_settings */
7850
7851 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7852 if (ifr->ifr_settings.size < size) {
7853 ifr->ifr_settings.size = size; /* data size wanted */
7854 return -ENOBUFS;
7855 }
7856
7857 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7858 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7859 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7860 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7861
7862 switch (flags){
7863 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7864 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7865 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7866 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7867 default: new_line.clock_type = CLOCK_DEFAULT;
7868 }
7869
7870 new_line.clock_rate = info->params.clock_speed;
7871 new_line.loopback = info->params.loopback ? 1:0;
7872
7873 if (copy_to_user(line, &new_line, size))
7874 return -EFAULT;
7875 return 0;
7876
7877 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7878
7879 if(!capable(CAP_NET_ADMIN))
7880 return -EPERM;
7881 if (copy_from_user(&new_line, line, size))
7882 return -EFAULT;
7883
7884 switch (new_line.clock_type)
7885 {
7886 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7887 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7888 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7889 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7890 case CLOCK_DEFAULT: flags = info->params.flags &
7891 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7892 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7893 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7894 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7895 default: return -EINVAL;
7896 }
7897
7898 if (new_line.loopback != 0 && new_line.loopback != 1)
7899 return -EINVAL;
7900
7901 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7902 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7903 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7904 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7905 info->params.flags |= flags;
7906
7907 info->params.loopback = new_line.loopback;
7908
7909 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7910 info->params.clock_speed = new_line.clock_rate;
7911 else
7912 info->params.clock_speed = 0;
7913
7914 /* if network interface up, reprogram hardware */
7915 if (info->netcount)
7916 mgsl_program_hw(info);
7917 return 0;
7918
7919 default:
7920 return hdlc_ioctl(dev, ifr, cmd);
7921 }
7922 }
7923
7924 /**
7925 * called by network layer when transmit timeout is detected
7926 *
7927 * dev pointer to network device structure
7928 */
hdlcdev_tx_timeout(struct net_device * dev)7929 static void hdlcdev_tx_timeout(struct net_device *dev)
7930 {
7931 struct mgsl_struct *info = dev_to_port(dev);
7932 unsigned long flags;
7933
7934 if (debug_level >= DEBUG_LEVEL_INFO)
7935 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7936
7937 dev->stats.tx_errors++;
7938 dev->stats.tx_aborted_errors++;
7939
7940 spin_lock_irqsave(&info->irq_spinlock,flags);
7941 usc_stop_transmitter(info);
7942 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7943
7944 netif_wake_queue(dev);
7945 }
7946
7947 /**
7948 * called by device driver when transmit completes
7949 * reenable network layer transmit if stopped
7950 *
7951 * info pointer to device instance information
7952 */
hdlcdev_tx_done(struct mgsl_struct * info)7953 static void hdlcdev_tx_done(struct mgsl_struct *info)
7954 {
7955 if (netif_queue_stopped(info->netdev))
7956 netif_wake_queue(info->netdev);
7957 }
7958
7959 /**
7960 * called by device driver when frame received
7961 * pass frame to network layer
7962 *
7963 * info pointer to device instance information
7964 * buf pointer to buffer contianing frame data
7965 * size count of data bytes in buf
7966 */
hdlcdev_rx(struct mgsl_struct * info,char * buf,int size)7967 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7968 {
7969 struct sk_buff *skb = dev_alloc_skb(size);
7970 struct net_device *dev = info->netdev;
7971
7972 if (debug_level >= DEBUG_LEVEL_INFO)
7973 printk("hdlcdev_rx(%s)\n", dev->name);
7974
7975 if (skb == NULL) {
7976 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7977 dev->name);
7978 dev->stats.rx_dropped++;
7979 return;
7980 }
7981
7982 memcpy(skb_put(skb, size), buf, size);
7983
7984 skb->protocol = hdlc_type_trans(skb, dev);
7985
7986 dev->stats.rx_packets++;
7987 dev->stats.rx_bytes += size;
7988
7989 netif_rx(skb);
7990 }
7991
7992 static const struct net_device_ops hdlcdev_ops = {
7993 .ndo_open = hdlcdev_open,
7994 .ndo_stop = hdlcdev_close,
7995 .ndo_change_mtu = hdlc_change_mtu,
7996 .ndo_start_xmit = hdlc_start_xmit,
7997 .ndo_do_ioctl = hdlcdev_ioctl,
7998 .ndo_tx_timeout = hdlcdev_tx_timeout,
7999 };
8000
8001 /**
8002 * called by device driver when adding device instance
8003 * do generic HDLC initialization
8004 *
8005 * info pointer to device instance information
8006 *
8007 * returns 0 if success, otherwise error code
8008 */
hdlcdev_init(struct mgsl_struct * info)8009 static int hdlcdev_init(struct mgsl_struct *info)
8010 {
8011 int rc;
8012 struct net_device *dev;
8013 hdlc_device *hdlc;
8014
8015 /* allocate and initialize network and HDLC layer objects */
8016
8017 if (!(dev = alloc_hdlcdev(info))) {
8018 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8019 return -ENOMEM;
8020 }
8021
8022 /* for network layer reporting purposes only */
8023 dev->base_addr = info->io_base;
8024 dev->irq = info->irq_level;
8025 dev->dma = info->dma_level;
8026
8027 /* network layer callbacks and settings */
8028 dev->netdev_ops = &hdlcdev_ops;
8029 dev->watchdog_timeo = 10 * HZ;
8030 dev->tx_queue_len = 50;
8031
8032 /* generic HDLC layer callbacks and settings */
8033 hdlc = dev_to_hdlc(dev);
8034 hdlc->attach = hdlcdev_attach;
8035 hdlc->xmit = hdlcdev_xmit;
8036
8037 /* register objects with HDLC layer */
8038 if ((rc = register_hdlc_device(dev))) {
8039 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8040 free_netdev(dev);
8041 return rc;
8042 }
8043
8044 info->netdev = dev;
8045 return 0;
8046 }
8047
8048 /**
8049 * called by device driver when removing device instance
8050 * do generic HDLC cleanup
8051 *
8052 * info pointer to device instance information
8053 */
hdlcdev_exit(struct mgsl_struct * info)8054 static void hdlcdev_exit(struct mgsl_struct *info)
8055 {
8056 unregister_hdlc_device(info->netdev);
8057 free_netdev(info->netdev);
8058 info->netdev = NULL;
8059 }
8060
8061 #endif /* CONFIG_HDLC */
8062
8063
synclink_init_one(struct pci_dev * dev,const struct pci_device_id * ent)8064 static int __devinit synclink_init_one (struct pci_dev *dev,
8065 const struct pci_device_id *ent)
8066 {
8067 struct mgsl_struct *info;
8068
8069 if (pci_enable_device(dev)) {
8070 printk("error enabling pci device %p\n", dev);
8071 return -EIO;
8072 }
8073
8074 if (!(info = mgsl_allocate_device())) {
8075 printk("can't allocate device instance data.\n");
8076 return -EIO;
8077 }
8078
8079 /* Copy user configuration info to device instance data */
8080
8081 info->io_base = pci_resource_start(dev, 2);
8082 info->irq_level = dev->irq;
8083 info->phys_memory_base = pci_resource_start(dev, 3);
8084
8085 /* Because veremap only works on page boundaries we must map
8086 * a larger area than is actually implemented for the LCR
8087 * memory range. We map a full page starting at the page boundary.
8088 */
8089 info->phys_lcr_base = pci_resource_start(dev, 0);
8090 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8091 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8092
8093 info->bus_type = MGSL_BUS_TYPE_PCI;
8094 info->io_addr_size = 8;
8095 info->irq_flags = IRQF_SHARED;
8096
8097 if (dev->device == 0x0210) {
8098 /* Version 1 PCI9030 based universal PCI adapter */
8099 info->misc_ctrl_value = 0x007c4080;
8100 info->hw_version = 1;
8101 } else {
8102 /* Version 0 PCI9050 based 5V PCI adapter
8103 * A PCI9050 bug prevents reading LCR registers if
8104 * LCR base address bit 7 is set. Maintain shadow
8105 * value so we can write to LCR misc control reg.
8106 */
8107 info->misc_ctrl_value = 0x087e4546;
8108 info->hw_version = 0;
8109 }
8110
8111 mgsl_add_device(info);
8112
8113 return 0;
8114 }
8115
synclink_remove_one(struct pci_dev * dev)8116 static void __devexit synclink_remove_one (struct pci_dev *dev)
8117 {
8118 }
8119
8120