1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9 /*
10 * This is a lower level module for the modular serial I/O driver. This
11 * module implements all hardware dependent functions for doing serial
12 * I/O on the IOC4 serial ports.
13 */
14
15 #include <linux/config.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <asm/sn/types.h>
20 #include <asm/sn/sgi.h>
21 #include <asm/sn/invent.h>
22 #include <asm/sn/driver.h>
23 #include <asm/sn/iograph.h>
24 #include <asm/param.h>
25 #include <asm/atomic.h>
26 #include <asm/delay.h>
27 #include <asm/semaphore.h>
28 #include <asm/sn/pio.h>
29 #include <asm/sn/xtalk/xwidget.h>
30 #include <asm/sn/io.h>
31 #include <asm/sn/pci/pci_defs.h>
32 #include <asm/sn/pci/pciio.h>
33 #include <asm/sn/ioc4.h>
34 #include <asm/sn/serialio.h>
35 #include <asm/sn/uart16550.h>
36
37 /* #define IOC4_SIO_DEBUG */
38 /* define USE_64BIT_DMA */
39
40 #define PENDING(port) (PCI_INW(&(port)->ip_ioc4->sio_ir) & port->ip_ienb)
41
42 /* Default to 4k buffers */
43 #ifdef IOC4_1K_BUFFERS
44 #define RING_BUF_SIZE 1024
45 #define IOC4_BUF_SIZE_BIT 0
46 #define PROD_CONS_MASK IOC4_PROD_CONS_PTR_1K
47 #else
48 #define RING_BUF_SIZE 4096
49 #define IOC4_BUF_SIZE_BIT IOC4_SBBR_L_SIZE
50 #define PROD_CONS_MASK IOC4_PROD_CONS_PTR_4K
51 #endif
52
53 #define TOTAL_RING_BUF_SIZE (RING_BUF_SIZE * 4)
54
55 #if PAGE_SIZE < TOTAL_RING_BUF_SIZE
56 #include <sys/pfdat.h>
57 #endif
58
59
60 #ifdef DPRINTF
61 #define dprintf(x) printk x
62 #else
63 #define dprintf(x)
64 #endif
65
66 #define NEWA(ptr,n) (ptr = snia_kmem_zalloc((n)*sizeof (*(ptr))))
67
68 #define contig_memalloc(a,b,c) kmem_zalloc(PAGE_SIZE * (a))
69 #define sio_port_islocked(a) 0 // FIXME: ?????
70
71 #define KM_PHYSCONTIG 0x0008
72 #define VM_DIRECT KM_PHYSCONTIG
73 #define VM_PHYSCONTIG KM_PHYSCONTIG
74
75 #ifdef DEBUG
76 #define PROGRESS() printk("%s : %d\n", __FUNCTION__, __LINE__)
77 #define NOT_PROGRESS() printk("%s : %d - Error\n", __FUNCTION__, __LINE__)
78 #else
79 #define PROGRESS() ;
80 #define NOT_PROGRESS() ;
81 #endif
82
83 static __inline__ void *
kvpalloc(size_t size,int flags,int colour)84 kvpalloc(size_t size, int flags, int colour)
85 {
86 if (flags & (VM_DIRECT|VM_PHYSCONTIG)) {
87 int order = 0;
88 while ((PAGE_SIZE << order) < (size << PAGE_SHIFT))
89 order++;
90 return (void *) __get_free_pages(GFP_KERNEL, order);
91 } else
92 return vmalloc(size << PAGE_SHIFT);
93 }
94
95 /* Local port info for the IOC4 serial ports. This contains as its
96 * first member the global sio port private data.
97 */
98 typedef struct ioc4port {
99 sioport_t ip_sioport; /* Must be first struct entry! */
100
101 vertex_hdl_t ip_conn_vhdl; /* vhdl to use for pciio requests */
102 vertex_hdl_t ip_port_vhdl; /* vhdl for the serial port */
103
104 /* Base piomap addr of the ioc4 board this port is on
105 * and associated serial map; serial map includes uart registers.
106 */
107 ioc4_mem_t *ip_ioc4;
108 ioc4_sregs_t *ip_serial;
109 ioc4_uart_t *ip_uart;
110
111 /* Ring buffer page for this port */
112 caddr_t ip_ring_buf_k0; /* Ring buffer location in K0 space */
113
114 /* Rings for this port */
115 struct ring *ip_inring;
116 struct ring *ip_outring;
117
118 /* Hook to port specific values for this port */
119 struct hooks *ip_hooks;
120
121 int ip_flags;
122
123 /* Cache of DCD/CTS bits last received */
124 char ip_modem_bits;
125
126 /* Various rx/tx parameters */
127 int ip_baud;
128 int ip_tx_lowat;
129 int ip_rx_timeout;
130
131 /* Copy of notification bits */
132 int ip_notify;
133
134 /* Shadow copies of various registers so we don't need to PIO
135 * read them constantly
136 */
137 ioc4reg_t ip_ienb; /* Enabled interrupts */
138
139 ioc4reg_t ip_sscr;
140
141 ioc4reg_t ip_tx_prod;
142 ioc4reg_t ip_rx_cons;
143
144 /* Back pointer to ioc4 soft area */
145 void *ip_ioc4_soft;
146 } ioc4port_t;
147
148 #if DEBUG
149 #define MAXSAVEPORT 256
150 static int next_saveport = 0;
151 static ioc4port_t *saveport[MAXSAVEPORT];
152 #endif
153
154 /* TX low water mark. We need to notify the driver whenever TX is getting
155 * close to empty so it can refill the TX buffer and keep things going.
156 * Let's assume that if we interrupt 1 ms before the TX goes idle, we'll
157 * have no trouble getting in more chars in time (I certainly hope so).
158 */
159 #define TX_LOWAT_LATENCY 1000
160 #define TX_LOWAT_HZ (1000000 / TX_LOWAT_LATENCY)
161 #define TX_LOWAT_CHARS(baud) (baud / 10 / TX_LOWAT_HZ)
162
163 /* Flags per port */
164 #define INPUT_HIGH 0x01
165 #define DCD_ON 0x02
166 #define LOWAT_WRITTEN 0x04
167 #define READ_ABORTED 0x08
168 #define TX_DISABLED 0x10
169
170 /* Get local port type from global sio port type */
171 #define LPORT(port) ((ioc4port_t *) (port))
172
173 /* Get global port from local port type */
174 #define GPORT(port) ((sioport_t *) (port))
175
176 /* Since each port has different register offsets and bitmasks
177 * for everything, we'll store those that we need in tables so we
178 * don't have to be constantly checking the port we are dealing with.
179 */
180 struct hooks {
181 ioc4reg_t intr_delta_dcd;
182 ioc4reg_t intr_delta_cts;
183 ioc4reg_t intr_tx_mt;
184 ioc4reg_t intr_rx_timer;
185 ioc4reg_t intr_rx_high;
186 ioc4reg_t intr_tx_explicit;
187 ioc4reg_t intr_dma_error;
188 ioc4reg_t intr_clear;
189 ioc4reg_t intr_all;
190 char rs422_select_pin;
191 };
192
193 static struct hooks hooks_array[4] =
194 {
195 /* Values for port 0 */
196 {
197 IOC4_SIO_IR_S0_DELTA_DCD,
198 IOC4_SIO_IR_S0_DELTA_CTS,
199 IOC4_SIO_IR_S0_TX_MT,
200 IOC4_SIO_IR_S0_RX_TIMER,
201 IOC4_SIO_IR_S0_RX_HIGH,
202 IOC4_SIO_IR_S0_TX_EXPLICIT,
203 IOC4_OTHER_IR_S0_MEMERR,
204 (IOC4_SIO_IR_S0_TX_MT | IOC4_SIO_IR_S0_RX_FULL |
205 IOC4_SIO_IR_S0_RX_HIGH | IOC4_SIO_IR_S0_RX_TIMER |
206 IOC4_SIO_IR_S0_DELTA_DCD | IOC4_SIO_IR_S0_DELTA_CTS |
207 IOC4_SIO_IR_S0_INT | IOC4_SIO_IR_S0_TX_EXPLICIT),
208 IOC4_SIO_IR_S0,
209 IOC4_GPPR_UART0_MODESEL_PIN,
210 },
211
212 /* Values for port 1 */
213 {
214 IOC4_SIO_IR_S1_DELTA_DCD,
215 IOC4_SIO_IR_S1_DELTA_CTS,
216 IOC4_SIO_IR_S1_TX_MT,
217 IOC4_SIO_IR_S1_RX_TIMER,
218 IOC4_SIO_IR_S1_RX_HIGH,
219 IOC4_SIO_IR_S1_TX_EXPLICIT,
220 IOC4_OTHER_IR_S1_MEMERR,
221 (IOC4_SIO_IR_S1_TX_MT | IOC4_SIO_IR_S1_RX_FULL |
222 IOC4_SIO_IR_S1_RX_HIGH | IOC4_SIO_IR_S1_RX_TIMER |
223 IOC4_SIO_IR_S1_DELTA_DCD | IOC4_SIO_IR_S1_DELTA_CTS |
224 IOC4_SIO_IR_S1_INT | IOC4_SIO_IR_S1_TX_EXPLICIT),
225 IOC4_SIO_IR_S1,
226 IOC4_GPPR_UART1_MODESEL_PIN,
227 },
228
229 /* Values for port 2 */
230 {
231 IOC4_SIO_IR_S2_DELTA_DCD,
232 IOC4_SIO_IR_S2_DELTA_CTS,
233 IOC4_SIO_IR_S2_TX_MT,
234 IOC4_SIO_IR_S2_RX_TIMER,
235 IOC4_SIO_IR_S2_RX_HIGH,
236 IOC4_SIO_IR_S2_TX_EXPLICIT,
237 IOC4_OTHER_IR_S2_MEMERR,
238 (IOC4_SIO_IR_S2_TX_MT | IOC4_SIO_IR_S2_RX_FULL |
239 IOC4_SIO_IR_S2_RX_HIGH | IOC4_SIO_IR_S2_RX_TIMER |
240 IOC4_SIO_IR_S2_DELTA_DCD | IOC4_SIO_IR_S2_DELTA_CTS |
241 IOC4_SIO_IR_S2_INT | IOC4_SIO_IR_S2_TX_EXPLICIT),
242 IOC4_SIO_IR_S2,
243 IOC4_GPPR_UART2_MODESEL_PIN,
244 },
245
246 /* Values for port 3 */
247 {
248 IOC4_SIO_IR_S3_DELTA_DCD,
249 IOC4_SIO_IR_S3_DELTA_CTS,
250 IOC4_SIO_IR_S3_TX_MT,
251 IOC4_SIO_IR_S3_RX_TIMER,
252 IOC4_SIO_IR_S3_RX_HIGH,
253 IOC4_SIO_IR_S3_TX_EXPLICIT,
254 IOC4_OTHER_IR_S3_MEMERR,
255 (IOC4_SIO_IR_S3_TX_MT | IOC4_SIO_IR_S3_RX_FULL |
256 IOC4_SIO_IR_S3_RX_HIGH | IOC4_SIO_IR_S3_RX_TIMER |
257 IOC4_SIO_IR_S3_DELTA_DCD | IOC4_SIO_IR_S3_DELTA_CTS |
258 IOC4_SIO_IR_S3_INT | IOC4_SIO_IR_S3_TX_EXPLICIT),
259 IOC4_SIO_IR_S3,
260 IOC4_GPPR_UART3_MODESEL_PIN,
261 }
262 };
263
264 /* Macros to get into the port hooks. Require a variable called
265 * hooks set to port->hooks
266 */
267 #define H_INTR_TX_MT hooks->intr_tx_mt
268 #define H_INTR_RX_TIMER hooks->intr_rx_timer
269 #define H_INTR_RX_HIGH hooks->intr_rx_high
270 #define H_INTR_TX_EXPLICIT hooks->intr_tx_explicit
271 #define H_INTR_DMA_ERROR hooks->intr_dma_error
272 #define H_INTR_CLEAR hooks->intr_clear
273 #define H_INTR_DELTA_DCD hooks->intr_delta_dcd
274 #define H_INTR_DELTA_CTS hooks->intr_delta_cts
275 #define H_INTR_ALL hooks->intr_all
276 #define H_RS422 hooks->rs422_select_pin
277
278 /* A ring buffer entry */
279 struct ring_entry {
280 union {
281 struct {
282 uint32_t alldata;
283 uint32_t allsc;
284 } all;
285 struct {
286 char data[4]; /* data bytes */
287 char sc[4]; /* status/control */
288 } s;
289 } u;
290 };
291
292 /* Test the valid bits in any of the 4 sc chars using "allsc" member */
293 #define RING_ANY_VALID \
294 ((uint32_t) (IOC4_RXSB_MODEM_VALID | IOC4_RXSB_DATA_VALID) * 0x01010101)
295
296 #define ring_sc u.s.sc
297 #define ring_data u.s.data
298 #define ring_allsc u.all.allsc
299
300 /* Number of entries per ring buffer. */
301 #define ENTRIES_PER_RING (RING_BUF_SIZE / (int) sizeof(struct ring_entry))
302
303 /* An individual ring */
304 struct ring {
305 struct ring_entry entries[ENTRIES_PER_RING];
306 };
307
308 /* The whole enchilada */
309 struct ring_buffer {
310 struct ring TX_0_OR_2;
311 struct ring RX_0_OR_2;
312 struct ring TX_1_OR_3;
313 struct ring RX_1_OR_3;
314 };
315
316 /* Get a ring from a port struct */
317 #define RING(port, which) \
318 &(((struct ring_buffer *) ((port)->ip_ring_buf_k0))->which)
319
320 /* Local functions: */
321 static int ioc4_open (sioport_t *port);
322 static int ioc4_config (sioport_t *port, int baud, int byte_size,
323 int stop_bits, int parenb, int parodd);
324 static int ioc4_enable_hfc (sioport_t *port, int enable);
325 static int ioc4_set_extclk (sioport_t *port, int clock_factor);
326
327 /* Data transmission */
328 static int do_ioc4_write (sioport_t *port, char *buf, int len);
329 static int ioc4_write (sioport_t *port, char *buf, int len);
330 static int ioc4_sync_write (sioport_t *port, char *buf, int len);
331 static void ioc4_wrflush (sioport_t *port);
332 static int ioc4_break (sioport_t *port, int brk);
333 static int ioc4_enable_tx (sioport_t *port, int enb);
334
335 /* Data reception */
336 static int ioc4_read (sioport_t *port, char *buf, int len);
337
338 /* Event notification */
339 static int ioc4_notification (sioport_t *port, int mask, int on);
340 static int ioc4_rx_timeout (sioport_t *port, int timeout);
341
342 /* Modem control */
343 static int ioc4_set_DTR (sioport_t *port, int dtr);
344 static int ioc4_set_RTS (sioport_t *port, int rts);
345 static int ioc4_query_DCD (sioport_t *port);
346 static int ioc4_query_CTS (sioport_t *port);
347
348 /* Output mode */
349 static int ioc4_set_proto (sioport_t *port, enum sio_proto proto);
350
351 /* User mapped driver support */
352 static int ioc4_get_mapid (sioport_t *port, void *arg);
353 static int ioc4_set_sscr (sioport_t *port, int arg, int flag);
354
355 static struct serial_calldown ioc4_calldown = {
356 ioc4_open,
357 ioc4_config,
358 ioc4_enable_hfc,
359 ioc4_set_extclk,
360 ioc4_write,
361 ioc4_sync_write,
362 ioc4_wrflush, /* du flush */
363 ioc4_break,
364 ioc4_enable_tx,
365 ioc4_read,
366 ioc4_notification,
367 ioc4_rx_timeout,
368 ioc4_set_DTR,
369 ioc4_set_RTS,
370 ioc4_query_DCD,
371 ioc4_query_CTS,
372 ioc4_set_proto,
373 ioc4_get_mapid,
374 0,
375 0,
376 ioc4_set_sscr
377 };
378
379 /* Baud rate stuff */
380 #define SET_BAUD(p, b) set_baud_ti(p, b)
381 static int set_baud_ti(ioc4port_t *, int);
382
383 #ifdef DEBUG
384 /* Performance characterization logging */
385 #define DEBUGINC(x,i) stats.x += i
386
387 static struct {
388
389 /* Ports present */
390 uint ports;
391
392 /* Ports killed */
393 uint killed;
394
395 /* Interrupt counts */
396 uint total_intr;
397 uint port_0_intr;
398 uint port_1_intr;
399 uint ddcd_intr;
400 uint dcts_intr;
401 uint rx_timer_intr;
402 uint rx_high_intr;
403 uint explicit_intr;
404 uint mt_intr;
405 uint mt_lowat_intr;
406
407 /* Write characteristics */
408 uint write_bytes;
409 uint write_cnt;
410 uint wrote_bytes;
411 uint tx_buf_used;
412 uint tx_buf_cnt;
413 uint tx_pio_cnt;691
414
415 /* Read characteristics */
416 uint read_bytes;
417 uint read_cnt;
418 uint drain;
419 uint drainwait;
420 uint resetdma;
421 uint read_ddcd;
422 uint rx_overrun;
423 uint parity;
424 uint framing;
425 uint brk;
426 uint red_bytes;
427 uint rx_buf_used;
428 uint rx_buf_cnt;
429
430 /* Errors */
431 uint dma_lost;
432 uint read_aborted;
433 uint read_aborted_detected;
434 } stats;
435
436 #else
437 #define DEBUGINC(x,i)
438 #endif
439
440 /* Infinite loop detection.
441 */
442 #define MAXITER 1000000
443 #define SPIN(cond, success) \
444 { \
445 int spiniter = 0; \
446 success = 1; \
447 while(cond) { \
448 spiniter++; \
449 if (spiniter > MAXITER) { \
450 success = 0; \
451 break; \
452 } \
453 } \
454 }
455
456
457 static iopaddr_t
ring_dmatrans(vertex_hdl_t conn_vhdl,caddr_t vaddr)458 ring_dmatrans(vertex_hdl_t conn_vhdl, caddr_t vaddr)
459 {
460 extern iopaddr_t pciio_dma_addr (vertex_hdl_t, device_desc_t, paddr_t,
461 size_t, pciio_dmamap_t *, unsigned);
462 iopaddr_t paddr = (iopaddr_t)vaddr;
463
464 if (conn_vhdl != GRAPH_VERTEX_NONE)
465 #ifdef USE_64BIT_DMA
466 /* Use 64-bit DMA address when the IOC4 supports it */
467 return pciio_dmatrans_addr (conn_vhdl, 0, paddr, TOTAL_RING_BUF_SIZE, PCIIO_DMA_A64 | PCIIO_BYTE_STREAM);
468
469 #else
470 /* Use 32-bit DMA address for current IOC4 */
471 return pciio_dma_addr (conn_vhdl, 0, paddr, TOTAL_RING_BUF_SIZE, NULL, PCIIO_BYTE_STREAM);
472 #endif
473
474 return paddr;
475 }
476
477
478 /* If interrupt routine called enable_intrs, then would need to write
479 * mask_enable_intrs() routine.
480 */
481 static inline void
mask_disable_intrs(ioc4port_t * port,ioc4reg_t mask)482 mask_disable_intrs(ioc4port_t *port, ioc4reg_t mask)
483 {
484 port->ip_ienb &= ~mask;
485 }
486
487
488 static void
enable_intrs(ioc4port_t * port,ioc4reg_t mask)489 enable_intrs(ioc4port_t *port, ioc4reg_t mask)
490 {
491 struct hooks *hooks = port->ip_hooks;
492
493 if ((port->ip_ienb & mask) != mask) {
494 IOC4_WRITE_IES(port->ip_ioc4_soft, mask, ioc4_sio_intr_type);
495 port->ip_ienb |= mask;
496 }
497
498 if (port->ip_ienb)
499 IOC4_WRITE_IES(port->ip_ioc4_soft, H_INTR_DMA_ERROR, ioc4_other_intr_type);
500 }
501
502
503 static void
disable_intrs(ioc4port_t * port,ioc4reg_t mask)504 disable_intrs(ioc4port_t *port, ioc4reg_t mask)
505 {
506 struct hooks *hooks = port->ip_hooks;
507
508 if (port->ip_ienb & mask) {
509 IOC4_WRITE_IEC(port->ip_ioc4_soft, mask, ioc4_sio_intr_type);
510 port->ip_ienb &= ~mask;
511 }
512
513 if (!port->ip_ienb)
514 IOC4_WRITE_IEC(port->ip_ioc4_soft, H_INTR_DMA_ERROR, ioc4_other_intr_type);
515 }
516
517
518 /* Service any pending interrupts on the given port */
519 static void
ioc4_serial_intr(intr_arg_t arg,ioc4reg_t sio_ir)520 ioc4_serial_intr(intr_arg_t arg, ioc4reg_t sio_ir)
521 {
522 ioc4port_t *port = (ioc4port_t *) arg;
523 sioport_t *gp = GPORT(port);
524 struct hooks *hooks = port->ip_hooks;
525 unsigned rx_high_rd_aborted = 0;
526 unsigned int flags;
527
528 PROGRESS();
529 #ifdef NOT_YET
530 ASSERT(sio_port_islocked(gp) == 0);
531 #endif
532
533 /* Possible race condition here: The TX_MT interrupt bit may be
534 * cleared without the intervention of the interrupt handler,
535 * e.g. by a write. If the top level interrupt handler reads a
536 * TX_MT, then some other processor does a write, starting up
537 * output, then we come in here, see the TX_MT and stop DMA, the
538 * output started by the other processor will hang. Thus we can
539 * only rely on TX_MT being legitimate if it is read while the
540 * port lock is held. Therefore this bit must be ignored in the
541 * passed in interrupt mask which was read by the top level
542 * interrupt handler since the port lock was not held at the time
543 * it was read. We can only rely on this bit being accurate if it
544 * is read while the port lock is held. So we'll clear it for now,
545 * and reload it later once we have the port lock.
546 */
547 sio_ir &= ~(H_INTR_TX_MT);
548
549 SIO_LOCK_PORT(gp, flags);
550
551 dprintf(("interrupt: sio_ir 0x%x\n", sio_ir));
552
553 do {
554 ioc4reg_t shadow;
555
556 /* Handle a DCD change */
557 if (sio_ir & H_INTR_DELTA_DCD) {
558 DEBUGINC(ddcd_intr, 1);
559
560 PROGRESS();
561 /* ACK the interrupt */
562 PCI_OUTW(&port->ip_ioc4->sio_ir, H_INTR_DELTA_DCD);
563
564 /* If DCD has raised, notify upper layer. Otherwise
565 * wait for a record to be posted to notify of a dropped DCD.
566 */
567 shadow = PCI_INW(&port->ip_serial->shadow);
568
569 if (port->ip_notify & N_DDCD) {
570 PROGRESS();
571 if (shadow & IOC4_SHADOW_DCD) /* Notify upper layer of DCD */
572 UP_DDCD(gp, 1);
573 else
574 port->ip_flags |= DCD_ON; /* Flag delta DCD/no DCD */
575 }
576 }
577
578 /* Handle a CTS change */
579 if (sio_ir & H_INTR_DELTA_CTS) {
580 DEBUGINC(dcts_intr, 1);
581 PROGRESS();
582
583 /* ACK the interrupt */
584 PCI_OUTW(&port->ip_ioc4->sio_ir, H_INTR_DELTA_CTS);
585
586 shadow = PCI_INW(&port->ip_serial->shadow);
587
588 /* Notify upper layer */
589 if (port->ip_notify & N_DCTS) {
590 if (shadow & IOC4_SHADOW_CTS)
591 UP_DCTS(gp, 1);
592 else
593 UP_DCTS(gp, 0);
594 }
595 }
596
597 /* RX timeout interrupt. Must be some data available. Put this
598 * before the check for RX_HIGH since servicing this condition
599 * may cause that condition to clear.
600 */
601 if (sio_ir & H_INTR_RX_TIMER) {
602 PROGRESS();
603 DEBUGINC(rx_timer_intr, 1);
604
605 /* ACK the interrupt */
606 PCI_OUTW(&port->ip_ioc4->sio_ir, H_INTR_RX_TIMER);
607
608 if (port->ip_notify & N_DATA_READY)
609 UP_DATA_READY(gp);
610 }
611
612 /* RX high interrupt. Must be after RX_TIMER.
613 */
614 else if (sio_ir & H_INTR_RX_HIGH) {
615 DEBUGINC(rx_high_intr, 1);
616
617 PROGRESS();
618 /* Data available, notify upper layer */
619 if (port->ip_notify & N_DATA_READY)
620 UP_DATA_READY(gp);
621
622 /* We can't ACK this interrupt. If up_data_ready didn't
623 * cause the condition to clear, we'll have to disable
624 * the interrupt until the data is drained by the upper layer.
625 * If the read was aborted, don't disable the interrupt as
626 * this may cause us to hang indefinitely. An aborted read
627 * generally means that this interrupt hasn't been delivered
628 * to the cpu yet anyway, even though we see it as asserted
629 * when we read the sio_ir.
630 */
631 if ((sio_ir = PENDING(port)) & H_INTR_RX_HIGH) {
632 PROGRESS();
633 if ((port->ip_flags & READ_ABORTED) == 0) {
634 mask_disable_intrs(port, H_INTR_RX_HIGH);
635 port->ip_flags |= INPUT_HIGH;
636 }
637 else {
638 DEBUGINC(read_aborted_detected, 1);
639 /* We will be stuck in this loop forever,
640 * higher level will never get time to finish
641 */
642 rx_high_rd_aborted++;
643 }
644 }
645 }
646
647 /* We got a low water interrupt: notify upper layer to
648 * send more data. Must come before TX_MT since servicing
649 * this condition may cause that condition to clear.
650 */
651 if (sio_ir & H_INTR_TX_EXPLICIT) {
652 DEBUGINC(explicit_intr, 1);
653 PROGRESS();
654
655 port->ip_flags &= ~LOWAT_WRITTEN;
656
657 /* ACK the interrupt */
658 PCI_OUTW(&port->ip_ioc4->sio_ir, H_INTR_TX_EXPLICIT);
659
660 if (port->ip_notify & N_OUTPUT_LOWAT)
661 UP_OUTPUT_LOWAT(gp);
662 }
663
664 /* Handle TX_MT. Must come after TX_EXPLICIT.
665 */
666 else if (sio_ir & H_INTR_TX_MT) {
667 DEBUGINC(mt_intr, 1);
668 PROGRESS();
669
670 /* If the upper layer is expecting a lowat notification
671 * and we get to this point it probably means that for
672 * some reason the TX_EXPLICIT didn't work as expected
673 * (that can legitimately happen if the output buffer is
674 * filled up in just the right way). So sent the notification
675 * now.
676 */
677 if (port->ip_notify & N_OUTPUT_LOWAT) {
678 DEBUGINC(mt_lowat_intr, 1);
679 PROGRESS();
680
681 if (port->ip_notify & N_OUTPUT_LOWAT)
682 UP_OUTPUT_LOWAT(gp);
683
684 /* We need to reload the sio_ir since the upcall may
685 * have caused another write to occur, clearing
686 * the TX_MT condition.
687 */
688 sio_ir = PENDING(port);
689 }
690
691 /* If the TX_MT condition still persists even after the upcall,
692 * we've got some work to do.
693 */
694 if (sio_ir & H_INTR_TX_MT) {
695
696 PROGRESS();
697
698 /* If we are not currently expecting DMA input, and the
699 * transmitter has just gone idle, there is no longer any
700 * reason for DMA, so disable it.
701 */
702 if (!(port->ip_notify & (N_DATA_READY | N_DDCD))) {
703 ASSERT(port->ip_sscr & IOC4_SSCR_DMA_EN);
704 port->ip_sscr &= ~IOC4_SSCR_DMA_EN;
705 PCI_OUTW(&port->ip_serial->sscr, port->ip_sscr);
706 }
707
708 /* Prevent infinite TX_MT interrupt */
709 mask_disable_intrs(port, H_INTR_TX_MT);
710 }
711 }
712
713 sio_ir = PENDING(port);
714
715 /* if the read was aborted and only H_INTR_RX_HIGH,
716 * clear H_INTR_RX_HIGH, so we do not loop forever.
717 */
718
719 if ( rx_high_rd_aborted && (sio_ir == H_INTR_RX_HIGH) ) {
720 sio_ir &= ~H_INTR_RX_HIGH;
721 }
722 } while (sio_ir & H_INTR_ALL);
723
724 SIO_UNLOCK_PORT(gp, flags);
725
726 /* Re-enable interrupts before returning from interrupt handler.
727 * Getting interrupted here is okay. It'll just v() our semaphore, and
728 * we'll come through the loop again.
729 */
730
731 IOC4_WRITE_IES(port->ip_ioc4_soft, port->ip_ienb, ioc4_sio_intr_type);
732 }
733
734
735 /*ARGSUSED*/
736
737 /* Service any pending DMA error interrupts on the given port */
738 static void
ioc4_dma_error_intr(intr_arg_t arg,ioc4reg_t other_ir)739 ioc4_dma_error_intr(intr_arg_t arg, ioc4reg_t other_ir)
740 {
741 ioc4port_t *port = (ioc4port_t *) arg;
742 sioport_t *gp = GPORT(port);
743 struct hooks *hooks = port->ip_hooks;
744 unsigned int flags;
745
746 SIO_LOCK_PORT(gp, flags);
747
748 dprintf(("interrupt: other_ir 0x%x\n", other_ir));
749
750 /* ACK the interrupt */
751 PCI_OUTW(&port->ip_ioc4->other_ir, H_INTR_DMA_ERROR);
752
753 printk( "DMA error on serial port %p\n", (void *)port->ip_port_vhdl);
754
755 if (port->ip_ioc4->pci_err_addr_l & IOC4_PCI_ERR_ADDR_VLD) {
756 printk( "PCI error address is 0x%lx, master is serial port %c %s\n",
757 ((uint64_t) port->ip_ioc4->pci_err_addr_h << 32) |
758 (port->ip_ioc4->pci_err_addr_l & IOC4_PCI_ERR_ADDR_ADDR_MSK),
759 '1' + (char) ((port->ip_ioc4->pci_err_addr_l &
760 IOC4_PCI_ERR_ADDR_MST_NUM_MSK) >> 1),
761 (port->ip_ioc4->pci_err_addr_l & IOC4_PCI_ERR_ADDR_MST_TYP_MSK)
762 ? "RX" : "TX");
763
764 if (port->ip_ioc4->pci_err_addr_l & IOC4_PCI_ERR_ADDR_MUL_ERR)
765 printk( "Multiple errors occurred\n");
766 }
767
768 SIO_UNLOCK_PORT(gp, flags);
769
770 /* Re-enable DMA error interrupts */
771 IOC4_WRITE_IES(port->ip_ioc4_soft, H_INTR_DMA_ERROR, ioc4_other_intr_type);
772 }
773
774
775 /* Baud rate setting code */
776 static int
set_baud_ti(ioc4port_t * port,int baud)777 set_baud_ti(ioc4port_t *port, int baud)
778 {
779 int actual_baud;
780 int diff;
781 int lcr;
782 unsigned short divisor;
783
784 divisor = SER_DIVISOR(baud, IOC4_SER_XIN_CLK);
785 if (!divisor)
786 return(1);
787 actual_baud = DIVISOR_TO_BAUD(divisor, IOC4_SER_XIN_CLK);
788
789 diff = actual_baud - baud;
790 if (diff < 0)
791 diff = -diff;
792
793 /* If we're within 1%, we've found a match */
794 if (diff * 100 > actual_baud)
795 return(1);
796
797 lcr = PCI_INB(&port->ip_uart->i4u_lcr);
798
799 PCI_OUTB(&port->ip_uart->i4u_lcr, lcr | LCR_DLAB);
800
801 PCI_OUTB(&port->ip_uart->i4u_dll, (char) divisor);
802
803 PCI_OUTB(&port->ip_uart->i4u_dlm, (char) (divisor >> 8));
804
805 PCI_OUTB(&port->ip_uart->i4u_lcr, lcr);
806
807 return(0);
808 }
809
810
811 /* Initialize the sio and ioc4 hardware for a given port */
812 static int
hardware_init(ioc4port_t * port)813 hardware_init(ioc4port_t *port)
814 {
815 ioc4reg_t sio_cr;
816 struct hooks *hooks = port->ip_hooks;
817
818 DEBUGINC(ports, 1);
819
820 /* Idle the IOC4 serial interface */
821 PCI_OUTW(&port->ip_serial->sscr, IOC4_SSCR_RESET);
822
823 /* Wait until any pending bus activity for this port has ceased */
824 do sio_cr = PCI_INW(&port->ip_ioc4->sio_cr);
825 while(!(sio_cr & IOC4_SIO_CR_SIO_DIAG_IDLE));
826
827 /* Finish reset sequence */
828 PCI_OUTW(&port->ip_serial->sscr, 0);
829
830 /* Once RESET is done, reload cached tx_prod and rx_cons values
831 * and set rings to empty by making prod == cons
832 */
833 port->ip_tx_prod = PCI_INW(&port->ip_serial->stcir) & PROD_CONS_MASK;
834 PCI_OUTW(&port->ip_serial->stpir, port->ip_tx_prod);
835
836 port->ip_rx_cons = PCI_INW(&port->ip_serial->srpir) & PROD_CONS_MASK;
837 PCI_OUTW(&port->ip_serial->srcir, port->ip_rx_cons);
838
839 /* Disable interrupts for this 16550 */
840 PCI_OUTB(&port->ip_uart->i4u_lcr, 0); /* clear DLAB */
841 PCI_OUTB(&port->ip_uart->i4u_ier, 0);
842
843 /* Set the default baud */
844 SET_BAUD(port, port->ip_baud);
845
846 /* Set line control to 8 bits no parity */
847 PCI_OUTB(&port->ip_uart->i4u_lcr, LCR_BITS8 | LCR_1_STOP_BITS);
848
849 /* Enable the FIFOs */
850 PCI_OUTB(&port->ip_uart->i4u_fcr, FCR_FIFOEN);
851 /* then reset 16550 FIFOs */
852 PCI_OUTB(&port->ip_uart->i4u_fcr,
853 FCR_FIFOEN | FCR_RxFIFO | FCR_TxFIFO);
854
855 /* Clear modem control register */
856 PCI_OUTB(&port->ip_uart->i4u_mcr, 0);
857
858 /* Clear deltas in modem status register */
859 PCI_INB(&port->ip_uart->i4u_msr);
860
861 /* Only do this once per port pair */
862 if (port->ip_hooks == &hooks_array[0] || port->ip_hooks == &hooks_array[2]) {
863 iopaddr_t ring_pci_addr;
864 volatile ioc4reg_t *sbbr_l;
865 volatile ioc4reg_t *sbbr_h;
866
867 if(port->ip_hooks == &hooks_array[0]) {
868 sbbr_l = &port->ip_ioc4->sbbr01_l;
869 sbbr_h = &port->ip_ioc4->sbbr01_h;
870 }
871 else {
872 sbbr_l = &port->ip_ioc4->sbbr23_l;
873 sbbr_h = &port->ip_ioc4->sbbr23_h;
874 }
875
876 /* Set the DMA address */
877 ring_pci_addr = ring_dmatrans(port->ip_conn_vhdl,
878 port->ip_ring_buf_k0);
879
880 PCI_OUTW(sbbr_h,
881 (ioc4reg_t) ((__psunsigned_t) ring_pci_addr >> 32));
882
883 PCI_OUTW(sbbr_l,
884 ((ioc4reg_t) (int64_t) ring_pci_addr | IOC4_BUF_SIZE_BIT));
885
886 #ifdef IOC4_SIO_DEBUG
887 {
888 unsigned int tmp1, tmp2;
889
890 tmp1 = PCI_INW(sbbr_l);
891 tmp2 = PCI_INW(sbbr_h);
892 printk("========== %s : sbbr_l [%p]/0x%x sbbr_h [%p]/0x%x\n",
893 __FUNCTION__, (void *)sbbr_l, tmp1, (void *)sbbr_h, tmp2);
894 }
895 #endif
896 }
897
898 /* Set the receive timeout value to 10 msec */
899 PCI_OUTW(&port->ip_serial->srtr, IOC4_SRTR_HZ / 100);
900
901 /* Set RX threshold, enable DMA */
902 /* Set high water mark at 3/4 of full ring */
903 port->ip_sscr = (ENTRIES_PER_RING * 3 / 4);
904
905 PCI_OUTW(&port->ip_serial->sscr, port->ip_sscr);
906
907 /* Disable and clear all serial related interrupt bits */
908 IOC4_WRITE_IEC(port->ip_ioc4_soft, H_INTR_CLEAR, ioc4_sio_intr_type);
909 port->ip_ienb &= ~H_INTR_CLEAR;
910 PCI_OUTW(&port->ip_ioc4->sio_ir, H_INTR_CLEAR);
911
912 return(0);
913 }
914
915
916 /*
917 * Device initialization.
918 * Called at *_attach() time for each
919 * IOC4 with serial ports in the system.
920 * If vhdl is GRAPH_VERTEX_NONE, do not do
921 * any graph related work; otherwise, it
922 * is the IOC4 vertex that should be used
923 * for requesting pciio services.
924 */
925 int
ioc4_serial_attach(vertex_hdl_t conn_vhdl,void * ioc4)926 ioc4_serial_attach(vertex_hdl_t conn_vhdl, void *ioc4)
927 {
928 /*REFERENCED*/
929 graph_error_t rc;
930 ioc4_mem_t *ioc4_mem;
931 vertex_hdl_t port_vhdl, ioc4_vhdl;
932 vertex_hdl_t intr_dev_vhdl;
933 ioc4port_t *port;
934 ioc4port_t *ports[4];
935 static char *names[] = { "tty/1", "tty/2", "tty/3", "tty/4" };
936 int x, first_port = -1, last_port = -1;
937 void *ioc4_soft;
938 unsigned int ioc4_revid_min = 62;
939 unsigned int ioc4_revid;
940
941
942 /* IOC4 firmware must be at least rev 62 */
943 ioc4_revid = pciio_config_get(conn_vhdl, PCI_CFG_REV_ID, 1);
944
945 if (ioc4_revid < ioc4_revid_min) {
946 printk( "IOC4 serial ports not supported on firmware rev %d, please upgrade to rev %d or higher\n", ioc4_revid, ioc4_revid_min);
947 return -1;
948 }
949
950 first_port = 0;
951 last_port = 3;
952
953 /* Get back pointer to the ioc4 soft area */
954 rc = hwgraph_traverse(conn_vhdl, EDGE_LBL_IOC4, &ioc4_vhdl);
955 ASSERT(rc == GRAPH_SUCCESS);
956 ioc4_soft = (void *)hwgraph_fastinfo_get(ioc4_vhdl);
957
958 /* grab the PIO address */
959 ioc4_mem = (ioc4_mem_t *)ioc4;
960 ASSERT(ioc4_mem != NULL);
961
962 /*
963 * Create port structures for each port
964 */
965 NEWA(port, 4);
966 #ifdef IOC4_SIO_DEBUG
967 printk("%s : [addr 0x%p]\n", __FUNCTION__, (void *)port);
968 #endif
969 ports[0] = port++;
970 ports[1] = port++;
971 ports[2] = port++;
972 ports[3] = port++;
973
974 #if DEBUG
975 {
976 int slot = atomicAddInt(&next_saveport, 4) - 4;
977 saveport[slot] = ports[0];
978 saveport[slot + 1] = ports[1];
979 saveport[slot + 2] = ports[2];
980 saveport[slot + 3] = ports[3];
981 ASSERT(slot < MAXSAVEPORT);
982 }
983 #endif
984
985 #ifdef DEBUG
986 if ((caddr_t) port != (caddr_t) &(port->ip_sioport))
987 panic("sioport is not first member of ioc4port struct\n");
988 #endif
989
990 /* Allocate buffers and jumpstart the hardware.
991 */
992 for (x = first_port; x < (last_port + 1); x++) {
993
994 port = ports[x];
995 #ifdef IOC4_SIO_DEBUG
996 printk("%s : initialize port %d [addr 0x%p/0x%p]\n", __FUNCTION__, x, (void *)port,
997 (void *)GPORT(port));
998 #endif
999 port->ip_ioc4_soft = ioc4_soft;
1000 rc = hwgraph_path_add(conn_vhdl, names[x], &port_vhdl);
1001 ASSERT(rc == GRAPH_SUCCESS);
1002 port->ip_conn_vhdl = conn_vhdl;
1003 port->ip_port_vhdl = port_vhdl;
1004 port->ip_ienb = 0;
1005 hwgraph_fastinfo_set(port_vhdl, (arbitrary_info_t) port);
1006
1007 /* Perform upper layer initialization. Create all device node
1008 * types including rs422 ports.
1009 */
1010 ioc4_serial_initport(GPORT(port), x);
1011 port->ip_baud = 9600;
1012
1013 /* Attach the calldown hooks so upper layer can call our
1014 * routines.
1015 */
1016 port->ip_sioport.sio_calldown = &ioc4_calldown;
1017
1018 /* Map in the IOC4 register area */
1019 port->ip_ioc4 = ioc4_mem;
1020 }
1021
1022 {
1023 /* Port 0 */
1024 port = ports[0];
1025 port->ip_hooks = &hooks_array[0];
1026
1027 /* Get direct hooks to the serial regs and uart regs
1028 * for this port
1029 */
1030 port->ip_serial = &(port->ip_ioc4->port_0);
1031 port->ip_uart = &(port->ip_ioc4->uart_0);
1032 #ifdef IOC4_SIO_DEBUG
1033 printk("==== %s : serial port 0 address 0x%p uart address 0x%p\n",
1034 __FUNCTION__, (void *)port->ip_serial, (void *)port->ip_uart);
1035 #endif
1036
1037 /* If we don't already have a ring buffer,
1038 * set one up.
1039 */
1040 if (port->ip_ring_buf_k0 == 0) {
1041
1042 #if PAGE_SIZE >= TOTAL_RING_BUF_SIZE
1043 if ((port->ip_ring_buf_k0 = kvpalloc(1, VM_DIRECT, 0)) == 0)
1044 panic("ioc4_uart driver cannot allocate page\n");
1045 #else
1046 /* We need to allocate a chunk of memory on a
1047 * TOTAL_RING_BUF_SIZE boundary.
1048 */
1049 {
1050 pgno_t pfn;
1051 caddr_t vaddr;
1052 if ((pfn = contig_memalloc(TOTAL_RING_BUF_SIZE / PAGE_SIZE,
1053 TOTAL_RING_BUF_SIZE / PAGE_SIZE,
1054 VM_DIRECT)) == 0)
1055 panic("ioc4_uart driver cannot allocate page\n");
1056 ASSERT(small_pfn(pfn));
1057 vaddr = small_pfntova_K0(pfn);
1058 (void) COLOR_VALIDATION(pfdat + pfn,
1059 colorof(vaddr),
1060 0, VM_DIRECT);
1061 port->ip_ring_buf_k0 = vaddr;
1062 }
1063 #endif
1064 }
1065 ASSERT((((int64_t)port->ip_ring_buf_k0) &
1066 (TOTAL_RING_BUF_SIZE - 1)) == 0);
1067 memset(port->ip_ring_buf_k0, 0, TOTAL_RING_BUF_SIZE);
1068 port->ip_inring = RING(port, RX_0_OR_2);
1069 port->ip_outring = RING(port, TX_0_OR_2);
1070
1071 /* Initialize the hardware for IOC4 */
1072 hardware_init(port);
1073
1074 if (hwgraph_edge_get(ports[0]->ip_port_vhdl, "d", &intr_dev_vhdl) !=
1075 GRAPH_SUCCESS) {
1076 intr_dev_vhdl = ports[0]->ip_port_vhdl;
1077 }
1078
1079 /* Attach interrupt handlers */
1080 ioc4_intr_connect(conn_vhdl,
1081 ioc4_sio_intr_type,
1082 IOC4_SIO_IR_S0,
1083 ioc4_serial_intr,
1084 ports[0],
1085 ports[0]->ip_port_vhdl,
1086 intr_dev_vhdl);
1087
1088 ioc4_intr_connect(conn_vhdl,
1089 ioc4_other_intr_type,
1090 IOC4_OTHER_IR_S0_MEMERR,
1091 ioc4_dma_error_intr,
1092 ports[0],
1093 ports[0]->ip_port_vhdl,
1094 intr_dev_vhdl);
1095 }
1096
1097 {
1098
1099 /* Port 1 */
1100 port = ports[1];
1101 port->ip_hooks = &hooks_array[1];
1102
1103 port->ip_serial = &(port->ip_ioc4->port_1);
1104 port->ip_uart = &(port->ip_ioc4->uart_1);
1105 #ifdef IOC4_SIO_DEBUG
1106 printk("==== %s : serial port 1 address 0x%p uart address 0x%p\n",
1107 __FUNCTION__, (void *)port->ip_serial, (void *)port->ip_uart);
1108 #endif
1109
1110 port->ip_ring_buf_k0 = ports[0]->ip_ring_buf_k0;
1111 port->ip_inring = RING(port, RX_1_OR_3);
1112 port->ip_outring = RING(port, TX_1_OR_3);
1113
1114 /* Initialize the hardware for IOC4 */
1115 hardware_init(port);
1116
1117 if (hwgraph_edge_get(ports[1]->ip_port_vhdl, "d", &intr_dev_vhdl) !=
1118 GRAPH_SUCCESS) {
1119 intr_dev_vhdl = ports[1]->ip_port_vhdl;
1120 }
1121
1122 /* Attach interrupt handler */
1123 ioc4_intr_connect(conn_vhdl,
1124 ioc4_sio_intr_type,
1125 IOC4_SIO_IR_S1,
1126 ioc4_serial_intr,
1127 ports[1],
1128 ports[1]->ip_port_vhdl,
1129 intr_dev_vhdl);
1130
1131 ioc4_intr_connect(conn_vhdl,
1132 ioc4_other_intr_type,
1133 IOC4_OTHER_IR_S1_MEMERR,
1134 ioc4_dma_error_intr,
1135 ports[1],
1136 ports[1]->ip_port_vhdl,
1137 intr_dev_vhdl);
1138 }
1139
1140 {
1141
1142 /* Port 2 */
1143 port = ports[2];
1144 port->ip_hooks = &hooks_array[2];
1145
1146 /* Get direct hooks to the serial regs and uart regs
1147 * for this port
1148 */
1149 port->ip_serial = &(port->ip_ioc4->port_2);
1150 port->ip_uart = &(port->ip_ioc4->uart_2);
1151 #ifdef IOC4_SIO_DEBUG
1152 printk("==== %s : serial port 2 address 0x%p uart address 0x%p\n",
1153 __FUNCTION__, (void *)port->ip_serial, (void *)port->ip_uart);
1154 #endif
1155
1156 /* If we don't already have a ring buffer,
1157 * set one up.
1158 */
1159 if (port->ip_ring_buf_k0 == 0) {
1160
1161 #if PAGE_SIZE >= TOTAL_RING_BUF_SIZE
1162 if ((port->ip_ring_buf_k0 = kvpalloc(1, VM_DIRECT, 0)) == 0)
1163 panic("ioc4_uart driver cannot allocate page\n");
1164 #else
1165
1166 /* We need to allocate a chunk of memory on a
1167 * TOTAL_RING_BUF_SIZE boundary.
1168 */
1169 {
1170 pgno_t pfn;
1171 caddr_t vaddr;
1172 if ((pfn = contig_memalloc(TOTAL_RING_BUF_SIZE / PAGE_SIZE,
1173 TOTAL_RING_BUF_SIZE / PAGE_SIZE,
1174 VM_DIRECT)) == 0)
1175 panic("ioc4_uart driver cannot allocate page\n");
1176 ASSERT(small_pfn(pfn));
1177 vaddr = small_pfntova_K0(pfn);
1178 (void) COLOR_VALIDATION(pfdat + pfn,
1179 colorof(vaddr),
1180 0, VM_DIRECT);
1181 port->ip_ring_buf_k0 = vaddr;
1182 }
1183 #endif
1184
1185 }
1186 ASSERT((((int64_t)port->ip_ring_buf_k0) &
1187 (TOTAL_RING_BUF_SIZE - 1)) == 0);
1188 memset(port->ip_ring_buf_k0, 0, TOTAL_RING_BUF_SIZE);
1189 port->ip_inring = RING(port, RX_0_OR_2);
1190 port->ip_outring = RING(port, TX_0_OR_2);
1191
1192 /* Initialize the hardware for IOC4 */
1193 hardware_init(port);
1194
1195 if (hwgraph_edge_get(ports[0]->ip_port_vhdl, "d", &intr_dev_vhdl) !=
1196 GRAPH_SUCCESS) {
1197 intr_dev_vhdl = ports[2]->ip_port_vhdl;
1198 }
1199
1200 /* Attach interrupt handler */
1201 ioc4_intr_connect(conn_vhdl,
1202 ioc4_sio_intr_type,
1203 IOC4_SIO_IR_S2,
1204 ioc4_serial_intr,
1205 ports[2],
1206 ports[2]->ip_port_vhdl,
1207 intr_dev_vhdl);
1208
1209 ioc4_intr_connect(conn_vhdl,
1210 ioc4_other_intr_type,
1211 IOC4_OTHER_IR_S2_MEMERR,
1212 ioc4_dma_error_intr,
1213 ports[2],
1214 ports[2]->ip_port_vhdl,
1215 intr_dev_vhdl);
1216 }
1217
1218 {
1219
1220 /* Port 3 */
1221 port = ports[3];
1222 port->ip_hooks = &hooks_array[3];
1223
1224 port->ip_serial = &(port->ip_ioc4->port_3);
1225 port->ip_uart = &(port->ip_ioc4->uart_3);
1226 #ifdef IOC4_SIO_DEBUG
1227 printk("==== %s : serial port 3 address 0x%p uart address 0x%p\n",
1228 __FUNCTION__, (void *)port->ip_serial, (void *)port->ip_uart);
1229 #endif
1230
1231 port->ip_ring_buf_k0 = ports[2]->ip_ring_buf_k0;
1232 port->ip_inring = RING(port, RX_1_OR_3);
1233 port->ip_outring = RING(port, TX_1_OR_3);
1234
1235 /* Initialize the hardware for IOC4 */
1236 hardware_init(port);
1237
1238 if (hwgraph_edge_get(ports[3]->ip_port_vhdl, "d", &intr_dev_vhdl) !=
1239 GRAPH_SUCCESS) {
1240 intr_dev_vhdl = ports[3]->ip_port_vhdl;
1241 }
1242
1243 /* Attach interrupt handler */
1244 ioc4_intr_connect(conn_vhdl,
1245 ioc4_sio_intr_type,
1246 IOC4_SIO_IR_S3,
1247 ioc4_serial_intr,
1248 ports[3],
1249 ports[3]->ip_port_vhdl,
1250 intr_dev_vhdl);
1251
1252 ioc4_intr_connect(conn_vhdl,
1253 ioc4_other_intr_type,
1254 IOC4_OTHER_IR_S3_MEMERR,
1255 ioc4_dma_error_intr,
1256 ports[3],
1257 ports[3]->ip_port_vhdl,
1258 intr_dev_vhdl);
1259 }
1260
1261 #ifdef DEBUG
1262 idbg_addfunc( "ioc4dump", idbg_ioc4dump );
1263 #endif
1264
1265 return 0;
1266 }
1267
1268
1269 /* Shut down an IOC4 */
1270 /* ARGSUSED1 */
1271 void
ioc4_serial_kill(ioc4port_t * port)1272 ioc4_serial_kill(ioc4port_t *port)
1273 {
1274 DEBUGINC(killed, 1);
1275
1276 /* Notify upper layer that this port is no longer usable */
1277 UP_DETACH(GPORT(port));
1278
1279 /* Clear everything in the sscr */
1280 PCI_OUTW(&port->ip_serial->sscr, 0);
1281 port->ip_sscr = 0;
1282
1283 #ifdef DEBUG
1284 /* Make sure nobody gets past the lock and accesses the hardware */
1285 port->ip_ioc4 = 0;
1286 port->ip_serial = 0;
1287 #endif
1288
1289 }
1290
1291
1292 /*
1293 * Open a port
1294 */
1295 static int
ioc4_open(sioport_t * port)1296 ioc4_open(sioport_t *port)
1297 {
1298 ioc4port_t *p = LPORT(port);
1299 int spin_success;
1300
1301 #ifdef NOT_YET
1302 ASSERT(L_LOCKED(port, L_OPEN));
1303 #endif
1304
1305 p->ip_flags = 0;
1306 p->ip_modem_bits = 0;
1307
1308 /* Pause the DMA interface if necessary */
1309 if (p->ip_sscr & IOC4_SSCR_DMA_EN) {
1310 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr | IOC4_SSCR_DMA_PAUSE);
1311 SPIN((PCI_INW(&p->ip_serial->sscr) & IOC4_SSCR_PAUSE_STATE) == 0,
1312 spin_success);
1313 if (!spin_success) {
1314 NOT_PROGRESS();
1315 return(-1);
1316 }
1317 }
1318
1319 /* Reset the input fifo. If the uart received chars while the port
1320 * was closed and DMA is not enabled, the uart may have a bunch of
1321 * chars hanging around in its RX fifo which will not be discarded
1322 * by rclr in the upper layer. We must get rid of them here.
1323 */
1324 PCI_OUTB(&p->ip_uart->i4u_fcr, FCR_FIFOEN | FCR_RxFIFO);
1325
1326 /* Set defaults */
1327 SET_BAUD(p, 9600);
1328
1329 PCI_OUTB(&p->ip_uart->i4u_lcr, LCR_BITS8 | LCR_1_STOP_BITS);
1330
1331 /* Re-enable DMA, set default threshold to intr whenever there is
1332 * data available.
1333 */
1334 p->ip_sscr &= ~IOC4_SSCR_RX_THRESHOLD;
1335 p->ip_sscr |= 1; /* default threshold */
1336
1337 /* Plug in the new sscr. This implicitly clears the DMA_PAUSE
1338 * flag if it was set above
1339 */
1340 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
1341
1342 PCI_OUTW(&p->ip_serial->srtr, 0);
1343
1344 p->ip_tx_lowat = 1;
1345
1346 dprintf(("ioc4 open successful\n"));
1347
1348 return(0);
1349 }
1350
1351
1352 /*
1353 * Config hardware
1354 */
1355 static int
ioc4_config(sioport_t * port,int baud,int byte_size,int stop_bits,int parenb,int parodd)1356 ioc4_config(sioport_t *port,
1357 int baud,
1358 int byte_size,
1359 int stop_bits,
1360 int parenb,
1361 int parodd)
1362 {
1363 ioc4port_t *p = LPORT(port);
1364 char lcr, sizebits;
1365 int spin_success;
1366
1367 #ifdef NOT_YET
1368 ASSERT(L_LOCKED(port, L_CONFIG));
1369 #endif
1370
1371 if (SET_BAUD(p, baud))
1372 return(1);
1373
1374 switch(byte_size) {
1375 case 5:
1376 sizebits = LCR_BITS5;
1377 break;
1378 case 6:
1379 sizebits = LCR_BITS6;
1380 break;
1381 case 7:
1382 sizebits = LCR_BITS7;
1383 break;
1384 case 8:
1385 sizebits = LCR_BITS8;
1386 break;
1387 default:
1388 dprintf(("invalid byte size port 0x%x size %d\n", port, byte_size));
1389 return(1);
1390 }
1391
1392 /* Pause the DMA interface if necessary */
1393 if (p->ip_sscr & IOC4_SSCR_DMA_EN) {
1394 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr | IOC4_SSCR_DMA_PAUSE);
1395 SPIN((PCI_INW(&p->ip_serial->sscr) & IOC4_SSCR_PAUSE_STATE) == 0,
1396 spin_success);
1397 if (!spin_success)
1398 return(-1);
1399 }
1400
1401 /* Clear relevant fields in lcr */
1402 lcr = PCI_INB(&p->ip_uart->i4u_lcr);
1403 lcr &= ~(LCR_MASK_BITS_CHAR | LCR_EPS |
1404 LCR_PEN | LCR_MASK_STOP_BITS);
1405
1406 /* Set byte size in lcr */
1407 lcr |= sizebits;
1408
1409 /* Set parity */
1410 if (parenb) {
1411 lcr |= LCR_PEN;
1412 if (!parodd)
1413 lcr |= LCR_EPS;
1414 }
1415
1416 /* Set stop bits */
1417 if (stop_bits)
1418 lcr |= LCR_2_STOP_BITS;
1419
1420 PCI_OUTB(&p->ip_uart->i4u_lcr, lcr);
1421
1422 dprintf(("ioc4_config: lcr bits 0x%x\n", lcr));
1423
1424 /* Re-enable the DMA interface if necessary */
1425 if (p->ip_sscr & IOC4_SSCR_DMA_EN) {
1426 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
1427 }
1428
1429 p->ip_baud = baud;
1430
1431 /* When we get within this number of ring entries of filling the
1432 * entire ring on TX, place an EXPLICIT intr to generate a lowat
1433 * notification when output has drained.
1434 */
1435 p->ip_tx_lowat = (TX_LOWAT_CHARS(baud) + 3) / 4;
1436 if (p->ip_tx_lowat == 0)
1437 p->ip_tx_lowat = 1;
1438
1439 ioc4_rx_timeout(port, p->ip_rx_timeout);
1440
1441 return(0);
1442 }
1443
1444
1445 /*
1446 * Enable hardware flow control
1447 */
1448 static int
ioc4_enable_hfc(sioport_t * port,int enable)1449 ioc4_enable_hfc(sioport_t *port, int enable)
1450 {
1451 ioc4port_t *p = LPORT(port);
1452
1453 #ifdef NOT_YET
1454 ASSERT(L_LOCKED(port, L_ENABLE_HFC));
1455 #endif
1456
1457 dprintf(("enable hfc port 0x%p, enb %d\n", (void *)port, enable));
1458
1459 if (enable)
1460 p->ip_sscr |= IOC4_SSCR_HFC_EN;
1461 else
1462 p->ip_sscr &= ~IOC4_SSCR_HFC_EN;
1463
1464 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
1465
1466 return(0);
1467 }
1468
1469
1470 /*
1471 * Set external clock
1472 */
1473 /*ARGSUSED*/
1474 static int
ioc4_set_extclk(sioport_t * port,int clock_factor)1475 ioc4_set_extclk(sioport_t *port, int clock_factor)
1476 {
1477 #ifdef NOT_YET
1478 ASSERT(L_LOCKED(port, L_SET_EXTCLK));
1479 /* XXX still todo */
1480 #endif
1481
1482 /* only support 0 (no external clock) */
1483 return(clock_factor);
1484 }
1485
1486
1487 /*
1488 * Write bytes to the hardware. Returns the number of bytes
1489 * actually written.
1490 */
1491 static int
do_ioc4_write(sioport_t * port,char * buf,int len)1492 do_ioc4_write(sioport_t *port, char *buf, int len)
1493 {
1494 int prod_ptr, cons_ptr, total;
1495 struct ring *outring;
1496 struct ring_entry *entry;
1497 ioc4port_t *p = LPORT(port);
1498 struct hooks *hooks = p->ip_hooks;
1499
1500 DEBUGINC(write_bytes, len);
1501 DEBUGINC(write_cnt, 1);
1502
1503 dprintf(("write port 0x%p, len %d\n", (void *)port, len));
1504
1505 ASSERT(len >= 0);
1506
1507 prod_ptr = p->ip_tx_prod;
1508 cons_ptr = PCI_INW(&p->ip_serial->stcir) & PROD_CONS_MASK;
1509 outring = p->ip_outring;
1510
1511 /* Maintain a 1-entry red-zone. The ring buffer is full when
1512 * (cons - prod) % ring_size is 1. Rather than do this subtraction
1513 * in the body of the loop, I'll do it now.
1514 */
1515 cons_ptr = (cons_ptr - (int) sizeof(struct ring_entry)) & PROD_CONS_MASK;
1516
1517 total = 0;
1518 /* Stuff the bytes into the output */
1519 while ((prod_ptr != cons_ptr) && (len > 0)) {
1520 int x;
1521
1522 /* Go 4 bytes (one ring entry) at a time */
1523 entry = (struct ring_entry*) ((caddr_t)outring + prod_ptr);
1524
1525 /* Invalidate all entries */
1526 entry->ring_allsc = 0;
1527
1528 /* Copy in some bytes */
1529 for(x = 0; (x < 4) && (len > 0); x++) {
1530 entry->ring_data[x] = *buf++;
1531 entry->ring_sc[x] = IOC4_TXCB_VALID;
1532 len--;
1533 total++;
1534 }
1535
1536 DEBUGINC(tx_buf_used, x);
1537 DEBUGINC(tx_buf_cnt, 1);
1538
1539 /* If we are within some small threshold of filling up the entire
1540 * ring buffer, we must place an EXPLICIT intr here to generate
1541 * a lowat interrupt in case we subsequently really do fill up
1542 * the ring and the caller goes to sleep. No need to place
1543 * more than one though.
1544 */
1545 if (!(p->ip_flags & LOWAT_WRITTEN) &&
1546 ((cons_ptr - prod_ptr) & PROD_CONS_MASK) <=
1547 p->ip_tx_lowat * (int)sizeof(struct ring_entry)) {
1548 p->ip_flags |= LOWAT_WRITTEN;
1549 entry->ring_sc[0] |= IOC4_TXCB_INT_WHEN_DONE;
1550 dprintf(("write placing TX_EXPLICIT\n"));
1551 }
1552
1553 /* Go on to next entry */
1554 prod_ptr = (prod_ptr + (int) sizeof(struct ring_entry)) & PROD_CONS_MASK;
1555 }
1556
1557 /* If we sent something, start DMA if necessary */
1558 if (total > 0 && !(p->ip_sscr & IOC4_SSCR_DMA_EN)) {
1559 p->ip_sscr |= IOC4_SSCR_DMA_EN;
1560 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
1561 }
1562
1563 /* Store the new producer pointer. If TX is disabled, we stuff the
1564 * data into the ring buffer, but we don't actually start TX.
1565 */
1566 if (!(p->ip_flags & TX_DISABLED)) {
1567 PCI_OUTW(&p->ip_serial->stpir, prod_ptr);
1568
1569 /* If we are now transmitting, enable TX_MT interrupt so we
1570 * can disable DMA if necessary when the TX finishes.
1571 */
1572 if (total > 0)
1573 enable_intrs(p, H_INTR_TX_MT);
1574 }
1575 p->ip_tx_prod = prod_ptr;
1576
1577 dprintf(("write port 0x%p, wrote %d\n", (void *)port, total));
1578 DEBUGINC(wrote_bytes, total);
1579 return(total);
1580 }
1581
1582
1583 /* Asynchronous write */
1584 static int
ioc4_write(sioport_t * port,char * buf,int len)1585 ioc4_write(sioport_t *port, char *buf, int len)
1586 {
1587 #ifdef NOT_YET
1588 ASSERT(L_LOCKED(port, L_WRITE));
1589 #endif
1590 return(do_ioc4_write(port, buf, len));
1591 }
1592
1593
1594 /* Synchronous write */
1595 static int
ioc4_sync_write(sioport_t * port,char * buf,int len)1596 ioc4_sync_write(sioport_t *port, char *buf, int len)
1597 {
1598 int bytes;
1599
1600 ASSERT(sio_port_islocked(port));
1601 bytes = do_ioc4_write(port, buf, len);
1602
1603 /* Don't allow the system to hang if XOFF is in force */
1604 if (len > 0 && bytes == 0 && (LPORT(port)->ip_flags & TX_DISABLED))
1605 ioc4_enable_tx(port, 1);
1606
1607 return(bytes);
1608 }
1609
1610
1611 /* Write flush */
1612 static void
ioc4_wrflush(sioport_t * port)1613 ioc4_wrflush(sioport_t *port)
1614 {
1615 ioc4port_t *p = LPORT(port);
1616
1617 ASSERT(sio_port_islocked(port));
1618
1619 /* We can't flush if TX is disabled due to XOFF. */
1620 if (!(PCI_INW(&p->ip_ioc4->sio_ir) & IOC4_SIO_IR_S0_TX_MT) &&
1621 (p->ip_flags & TX_DISABLED))
1622 ioc4_enable_tx(port, 1);
1623
1624 /* Spin waiting for TX_MT to assert only if DMA is enabled. If we
1625 * are panicking and one of the other processors is already in
1626 * symmon, DMA will be disabled and TX_MT will never be asserted.
1627 * There may also be legitimate cases in the kernel where DMA is
1628 * disabled and we won't flush correctly here.
1629 */
1630
1631 while ((PCI_INW(&p->ip_serial->sscr) & (IOC4_SSCR_DMA_EN |
1632 IOC4_SSCR_PAUSE_STATE)) == IOC4_SSCR_DMA_EN &&
1633 !(PCI_INW(&p->ip_ioc4->sio_ir) & IOC4_SIO_IR_S0_TX_MT)) {
1634 udelay(5);
1635 }
1636 }
1637
1638
1639 /*
1640 * Set or clear break condition on output
1641 */
1642 static int
ioc4_break(sioport_t * port,int brk)1643 ioc4_break(sioport_t *port, int brk)
1644 {
1645 ioc4port_t *p = LPORT(port);
1646 char lcr;
1647 int spin_success;
1648
1649 #ifdef NOT_YET
1650 ASSERT(L_LOCKED(port, L_BREAK));
1651 #endif
1652
1653 /* Pause the DMA interface if necessary */
1654 if (p->ip_sscr & IOC4_SSCR_DMA_EN) {
1655 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr | IOC4_SSCR_DMA_PAUSE);
1656 SPIN((PCI_INW(&p->ip_serial->sscr) & IOC4_SSCR_PAUSE_STATE) == 0,
1657 spin_success);
1658 if (!spin_success)
1659 return(-1);
1660 }
1661
1662 lcr = PCI_INB(&p->ip_uart->i4u_lcr);
1663 if (brk) {
1664 /* Set break */
1665 PCI_OUTB(&p->ip_uart->i4u_lcr, lcr | LCR_SNDBRK);
1666 }
1667 else {
1668 /* Clear break */
1669 PCI_OUTB(&p->ip_uart->i4u_lcr, lcr & ~LCR_SNDBRK);
1670 }
1671
1672 /* Re-enable the DMA interface if necessary */
1673 if (p->ip_sscr & IOC4_SSCR_DMA_EN) {
1674 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
1675 }
1676
1677 dprintf(("break port 0x%p, brk %d\n", (void *)port, brk));
1678
1679 return(0);
1680 }
1681
1682
1683 static int
ioc4_enable_tx(sioport_t * port,int enb)1684 ioc4_enable_tx(sioport_t *port, int enb)
1685 {
1686 ioc4port_t *p = LPORT(port);
1687 struct hooks *hooks = p->ip_hooks;
1688 int spin_success;
1689
1690 #ifdef NOT_YET
1691 ASSERT(L_LOCKED(port, L_ENABLE_TX));
1692 #endif
1693
1694 /* If we're already in the desired state, we're done */
1695 if ((enb && !(p->ip_flags & TX_DISABLED)) ||
1696 (!enb && (p->ip_flags & TX_DISABLED)))
1697 return(0);
1698
1699 /* Pause DMA */
1700 if (p->ip_sscr & IOC4_SSCR_DMA_EN) {
1701 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr | IOC4_SSCR_DMA_PAUSE);
1702 SPIN((PCI_INW(&p->ip_serial->sscr) & IOC4_SSCR_PAUSE_STATE) == 0,
1703 spin_success);
1704 if (!spin_success)
1705 return(-1);
1706 }
1707
1708 if (enb) {
1709 p->ip_flags &= ~TX_DISABLED;
1710 PCI_OUTW(&p->ip_serial->stpir, p->ip_tx_prod);
1711 enable_intrs(p, H_INTR_TX_MT);
1712 }
1713 else {
1714 ioc4reg_t txcons = PCI_INW(&p->ip_serial->stcir) & PROD_CONS_MASK;
1715 p->ip_flags |= TX_DISABLED;
1716 disable_intrs(p, H_INTR_TX_MT);
1717
1718 /* Only move the transmit producer pointer back if the
1719 * transmitter is not already empty, otherwise we'll be
1720 * generating a bogus entry.
1721 */
1722 if (txcons != p->ip_tx_prod)
1723 PCI_OUTW(&p->ip_serial->stpir,
1724 (txcons + (int) sizeof(struct ring_entry)) & PROD_CONS_MASK);
1725 }
1726
1727 /* Re-enable the DMA interface if necessary */
1728 if (p->ip_sscr & IOC4_SSCR_DMA_EN)
1729 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
1730
1731 return(0);
1732 }
1733
1734
1735 /*
1736 * Read in bytes from the hardware. Return the number of bytes
1737 * actually read.
1738 */
1739 static int
ioc4_read(sioport_t * port,char * buf,int len)1740 ioc4_read(sioport_t *port, char *buf, int len)
1741 {
1742 int prod_ptr, cons_ptr, total, x, spin_success;
1743 struct ring *inring;
1744 ioc4port_t *p = LPORT(port);
1745 struct hooks *hooks = p->ip_hooks;
1746
1747 #ifdef NOT_YET
1748 ASSERT(L_LOCKED(port, L_READ));
1749 #endif
1750
1751 dprintf(("read port 0x%p, len %d\n", (void *)port, len));
1752
1753 DEBUGINC(read_bytes, len);
1754 DEBUGINC(read_cnt, 1);
1755
1756 ASSERT(len >= 0);
1757
1758 /* There is a nasty timing issue in the IOC4. When the RX_TIMER
1759 * expires or the RX_HIGH condition arises, we take an interrupt.
1760 * At some point while servicing the interrupt, we read bytes from
1761 * the ring buffer and re-arm the RX_TIMER. However the RX_TIMER is
1762 * not started until the first byte is received *after* it is armed,
1763 * and any bytes pending in the RX construction buffers are not drained
1764 * to memory until either there are 4 bytes available or the RX_TIMER
1765 * expires. This leads to a potential situation where data is left
1766 * in the construction buffers forever because 1 to 3 bytes were received
1767 * after the interrupt was generated but before the RX_TIMER was re-armed.
1768 * At that point as long as no subsequent bytes are received the
1769 * timer will never be started and the bytes will remain in the
1770 * construction buffer forever. The solution is to execute a DRAIN
1771 * command after rearming the timer. This way any bytes received before
1772 * the DRAIN will be drained to memory, and any bytes received after
1773 * the DRAIN will start the TIMER and be drained when it expires.
1774 * Luckily, this only needs to be done when the DMA buffer is empty
1775 * since there is no requirement that this function return all
1776 * available data as long as it returns some.
1777 */
1778 /* Re-arm the timer */
1779 PCI_OUTW(&p->ip_serial->srcir, p->ip_rx_cons | IOC4_SRCIR_ARM);
1780
1781 prod_ptr = PCI_INW(&p->ip_serial->srpir) & PROD_CONS_MASK;
1782 cons_ptr = p->ip_rx_cons;
1783
1784 if (prod_ptr == cons_ptr) {
1785 int reset_dma = 0;
1786
1787 /* Input buffer appears empty, do a flush. */
1788
1789 /* DMA must be enabled for this to work. */
1790 if (!(p->ip_sscr & IOC4_SSCR_DMA_EN)) {
1791 p->ip_sscr |= IOC4_SSCR_DMA_EN;
1792 reset_dma = 1;
1793 }
1794
1795 /* Potential race condition: we must reload the srpir after
1796 * issuing the drain command, otherwise we could think the RX
1797 * buffer is empty, then take a very long interrupt, and when
1798 * we come back it's full and we wait forever for the drain to
1799 * complete.
1800 */
1801 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr | IOC4_SSCR_RX_DRAIN);
1802 prod_ptr = PCI_INW(&p->ip_serial->srpir) & PROD_CONS_MASK;
1803
1804 DEBUGINC(drain, 1);
1805
1806 /* We must not wait for the DRAIN to complete unless there are
1807 * at least 8 bytes (2 ring entries) available to receive the data
1808 * otherwise the DRAIN will never complete and we'll deadlock here.
1809 * In fact, to make things easier, I'll just ignore the flush if
1810 * there is any data at all now available.
1811 */
1812 if (prod_ptr == cons_ptr) {
1813 DEBUGINC(drainwait, 1);
1814 SPIN(PCI_INW(&p->ip_serial->sscr) & IOC4_SSCR_RX_DRAIN, spin_success);
1815 if (!spin_success)
1816 return(-1);
1817
1818 /* SIGH. We have to reload the prod_ptr *again* since
1819 * the drain may have caused it to change
1820 */
1821 prod_ptr = PCI_INW(&p->ip_serial->srpir) & PROD_CONS_MASK;
1822 }
1823
1824 if (reset_dma) {
1825 DEBUGINC(resetdma, 1);
1826 p->ip_sscr &= ~IOC4_SSCR_DMA_EN;
1827 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
1828 }
1829 }
1830 inring = p->ip_inring;
1831
1832 p->ip_flags &= ~READ_ABORTED;
1833
1834 total = 0;
1835 /* Grab bytes from the hardware */
1836 while(prod_ptr != cons_ptr && len > 0) {
1837 struct ring_entry *entry;
1838
1839 entry = (struct ring_entry *) ((caddr_t)inring + cons_ptr);
1840
1841 /* According to the producer pointer, this ring entry
1842 * must contain some data. But if the PIO happened faster
1843 * than the DMA, the data may not be available yet, so let's
1844 * wait until it arrives.
1845 */
1846 if ((((volatile struct ring_entry *) entry)->ring_allsc &
1847 RING_ANY_VALID) == 0) {
1848
1849 /* Indicate the read is aborted so we don't disable
1850 * the interrupt thinking that the consumer is
1851 * congested.
1852 */
1853 p->ip_flags |= READ_ABORTED;
1854
1855 DEBUGINC(read_aborted, 1);
1856 len = 0;
1857 break;
1858
1859 }
1860
1861 /* Load the bytes/status out of the ring entry */
1862 for(x = 0; x < 4 && len > 0; x++) {
1863 char *sc = &(entry->ring_sc[x]);
1864
1865 /* Check for change in modem state or overrun */
1866 if (*sc & IOC4_RXSB_MODEM_VALID) {
1867 if (p->ip_notify & N_DDCD) {
1868
1869 /* Notify upper layer if DCD dropped */
1870 if ((p->ip_flags & DCD_ON) && !(*sc & IOC4_RXSB_DCD)) {
1871
1872 /* If we have already copied some data, return
1873 * it. We'll pick up the carrier drop on the next
1874 * pass. That way we don't throw away the data
1875 * that has already been copied back to the caller's
1876 * buffer.
1877 */
1878 if (total > 0) {
1879 len = 0;
1880 break;
1881 }
1882
1883 p->ip_flags &= ~DCD_ON;
1884
1885 /* Turn off this notification so the carrier
1886 * drop protocol won't see it again when it
1887 * does a read.
1888 */
1889 *sc &= ~IOC4_RXSB_MODEM_VALID;
1890
1891 /* To keep things consistent, we need to update
1892 * the consumer pointer so the next reader won't
1893 * come in and try to read the same ring entries
1894 * again. This must be done here before the call
1895 * to UP_DDCD since UP_DDCD may do a recursive
1896 * read!
1897 */
1898 if ((entry->ring_allsc & RING_ANY_VALID) == 0)
1899 cons_ptr =
1900 (cons_ptr + (int) sizeof(struct ring_entry)) &
1901 PROD_CONS_MASK;
1902
1903 PCI_OUTW(&p->ip_serial->srcir, cons_ptr);
1904 p->ip_rx_cons = cons_ptr;
1905
1906 /* Notify upper layer of carrier drop */
1907 if (p->ip_notify & N_DDCD)
1908 UP_DDCD(port, 0);
1909
1910 DEBUGINC(read_ddcd, 1);
1911
1912 /* If we had any data to return, we would have
1913 * returned it above.
1914 */
1915 return(0);
1916 }
1917 }
1918
1919 /* Notify upper layer that an input overrun occurred */
1920 if ((*sc & IOC4_RXSB_OVERRUN) && (p->ip_notify & N_OVERRUN_ERROR)) {
1921 DEBUGINC(rx_overrun, 1);
1922 UP_NCS(port, NCS_OVERRUN);
1923 }
1924
1925 /* Don't look at this byte again */
1926 *sc &= ~IOC4_RXSB_MODEM_VALID;
1927 }
1928
1929 /* Check for valid data or RX errors */
1930 if (*sc & IOC4_RXSB_DATA_VALID) {
1931 if ((*sc & (IOC4_RXSB_PAR_ERR | IOC4_RXSB_FRAME_ERR |
1932 IOC4_RXSB_BREAK)) &&
1933 (p->ip_notify & (N_PARITY_ERROR | N_FRAMING_ERROR | N_BREAK))) {
1934
1935 /* There is an error condition on the next byte. If
1936 * we have already transferred some bytes, we'll stop
1937 * here. Otherwise if this is the first byte to be read,
1938 * we'll just transfer it alone after notifying the
1939 * upper layer of its status.
1940 */
1941 if (total > 0) {
1942 len = 0;
1943 break;
1944 }
1945 else {
1946 if ((*sc & IOC4_RXSB_PAR_ERR) &&
1947 (p->ip_notify & N_PARITY_ERROR)) {
1948 DEBUGINC(parity, 1);
1949 UP_NCS(port, NCS_PARITY);
1950 }
1951
1952 if ((*sc & IOC4_RXSB_FRAME_ERR) &&
1953 (p->ip_notify & N_FRAMING_ERROR)) {
1954 DEBUGINC(framing, 1);
1955 UP_NCS(port, NCS_FRAMING);
1956 }
1957
1958 if ((*sc & IOC4_RXSB_BREAK) &&
1959 (p->ip_notify & N_BREAK)) {
1960 DEBUGINC(brk, 1);
1961 UP_NCS(port, NCS_BREAK);
1962 }
1963 len = 1;
1964 }
1965 }
1966
1967 *sc &= ~IOC4_RXSB_DATA_VALID;
1968 *buf++ = entry->ring_data[x];
1969 len--;
1970 total++;
1971 }
1972 }
1973
1974 DEBUGINC(rx_buf_used, x);
1975 DEBUGINC(rx_buf_cnt, 1);
1976
1977 /* If we used up this entry entirely, go on to the next one,
1978 * otherwise we must have run out of buffer space, so
1979 * leave the consumer pointer here for the next read in case
1980 * there are still unread bytes in this entry.
1981 */
1982 if ((entry->ring_allsc & RING_ANY_VALID) == 0)
1983 cons_ptr = (cons_ptr + (int) sizeof(struct ring_entry)) &
1984 PROD_CONS_MASK;
1985 }
1986
1987 /* Update consumer pointer and re-arm RX timer interrupt */
1988 PCI_OUTW(&p->ip_serial->srcir, cons_ptr);
1989 p->ip_rx_cons = cons_ptr;
1990
1991 /* If we have now dipped below the RX high water mark and we have
1992 * RX_HIGH interrupt turned off, we can now turn it back on again.
1993 */
1994 if ((p->ip_flags & INPUT_HIGH) &&
1995 (((prod_ptr - cons_ptr) & PROD_CONS_MASK) <
1996 ((p->ip_sscr & IOC4_SSCR_RX_THRESHOLD) << IOC4_PROD_CONS_PTR_OFF))) {
1997 p->ip_flags &= ~INPUT_HIGH;
1998 enable_intrs(p, H_INTR_RX_HIGH);
1999 }
2000
2001 DEBUGINC(red_bytes, total);
2002
2003 return(total);
2004 }
2005
2006
2007 /*
2008 * Modify event notification
2009 */
2010 static int
ioc4_notification(sioport_t * port,int mask,int on)2011 ioc4_notification(sioport_t *port, int mask, int on)
2012 {
2013 ioc4port_t *p = LPORT(port);
2014 struct hooks *hooks = p->ip_hooks;
2015 ioc4reg_t intrbits, sscrbits;
2016
2017 #ifdef NOT_YET
2018 ASSERT(L_LOCKED(port, L_NOTIFICATION));
2019 #endif
2020 ASSERT(mask);
2021
2022 intrbits = sscrbits = 0;
2023
2024 if (mask & N_DATA_READY)
2025 intrbits |= (H_INTR_RX_TIMER | H_INTR_RX_HIGH);
2026 if (mask & N_OUTPUT_LOWAT)
2027 intrbits |= H_INTR_TX_EXPLICIT;
2028 if (mask & N_DDCD) {
2029 intrbits |= H_INTR_DELTA_DCD;
2030 sscrbits |= IOC4_SSCR_RX_RING_DCD;
2031 }
2032 if (mask & N_DCTS)
2033 intrbits |= H_INTR_DELTA_CTS;
2034
2035 if (on) {
2036 enable_intrs(p, intrbits);
2037 p->ip_notify |= mask;
2038 p->ip_sscr |= sscrbits;
2039 }
2040 else {
2041 disable_intrs(p, intrbits);
2042 p->ip_notify &= ~mask;
2043 p->ip_sscr &= ~sscrbits;
2044 }
2045
2046 /* We require DMA if either DATA_READY or DDCD notification is
2047 * currently requested. If neither of these is requested and
2048 * there is currently no TX in progress, DMA may be disabled.
2049 */
2050 if (p->ip_notify & (N_DATA_READY | N_DDCD))
2051 p->ip_sscr |= IOC4_SSCR_DMA_EN;
2052 else if (!(p->ip_ienb & H_INTR_TX_MT))
2053 p->ip_sscr &= ~IOC4_SSCR_DMA_EN;
2054
2055 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
2056 return(0);
2057 }
2058
2059
2060 /*
2061 * Set RX timeout and threshold values. The upper layer passes in a
2062 * timeout value. In all cases it would like to be notified at least this
2063 * often when there are RX chars coming in. We set the RX timeout and
2064 * RX threshold (based on baud) to ensure that the upper layer is called
2065 * at roughly this interval during normal RX.
2066 * The input timeout value is in ticks.
2067 */
2068 static int
ioc4_rx_timeout(sioport_t * port,int timeout)2069 ioc4_rx_timeout(sioport_t *port, int timeout)
2070 {
2071 int threshold;
2072 ioc4port_t *p = LPORT(port);
2073
2074 #ifdef NOT_YET
2075 ASSERT(L_LOCKED(port, L_RX_TIMEOUT));
2076 #endif
2077
2078 p->ip_rx_timeout = timeout;
2079
2080 /* Timeout is in ticks. Let's figure out how many chars we
2081 * can receive at the current baud rate in that interval
2082 * and set the RX threshold to that amount. There are 4 chars
2083 * per ring entry, so we'll divide the number of chars that will
2084 * arrive in timeout by 4.
2085 */
2086 threshold = timeout * p->ip_baud / 10 / HZ / 4;
2087 if (threshold == 0)
2088 threshold = 1; /* otherwise we'll intr all the time! */
2089
2090 if ((unsigned) threshold > (unsigned) IOC4_SSCR_RX_THRESHOLD)
2091 return(1);
2092
2093 p->ip_sscr &= ~IOC4_SSCR_RX_THRESHOLD;
2094 p->ip_sscr |= threshold;
2095
2096 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
2097
2098 /* Now set the RX timeout to the given value */
2099 timeout = timeout * IOC4_SRTR_HZ / HZ;
2100 if (timeout > IOC4_SRTR_CNT)
2101 timeout = IOC4_SRTR_CNT;
2102
2103 PCI_OUTW(&p->ip_serial->srtr, timeout);
2104
2105 return(0);
2106 }
2107
2108
2109 static int
set_DTRRTS(sioport_t * port,int val,int mask1,int mask2)2110 set_DTRRTS(sioport_t *port, int val, int mask1, int mask2)
2111 {
2112 ioc4port_t *p = LPORT(port);
2113 ioc4reg_t shadow;
2114 int spin_success;
2115 char mcr;
2116
2117 /* XXX need lock for pretty much this entire routine. Makes
2118 * me nervous to hold it for so long. If we crash or hit
2119 * a breakpoint in here, we're hosed.
2120 */
2121
2122 /* Pause the DMA interface if necessary */
2123 if (p->ip_sscr & IOC4_SSCR_DMA_EN) {
2124 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr | IOC4_SSCR_DMA_PAUSE);
2125 SPIN((PCI_INW(&p->ip_serial->sscr) & IOC4_SSCR_PAUSE_STATE) == 0,
2126 spin_success);
2127 if (!spin_success)
2128 return(-1);
2129 }
2130
2131 shadow = PCI_INW(&p->ip_serial->shadow);
2132 mcr = (shadow & 0xff000000) >> 24;
2133
2134 /* Set new value */
2135 if (val) {
2136 mcr |= mask1;
2137 shadow |= mask2;
2138 }
2139 else {
2140 mcr &= ~mask1;
2141 shadow &= ~mask2;
2142 }
2143
2144 PCI_OUTB(&p->ip_uart->i4u_mcr, mcr);
2145
2146 PCI_OUTW(&p->ip_serial->shadow, shadow);
2147
2148 /* Re-enable the DMA interface if necessary */
2149 if (p->ip_sscr & IOC4_SSCR_DMA_EN)
2150 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
2151
2152 return(0);
2153 }
2154
2155
2156 static int
ioc4_set_DTR(sioport_t * port,int dtr)2157 ioc4_set_DTR(sioport_t *port, int dtr)
2158 {
2159 #ifdef NOT_YET
2160 ASSERT(L_LOCKED(port, L_SET_DTR));
2161 #endif
2162
2163 dprintf(("set dtr port 0x%p, dtr %d\n", (void *)port, dtr));
2164 return(set_DTRRTS(port, dtr, MCR_DTR, IOC4_SHADOW_DTR));
2165 }
2166
2167
2168 static int
ioc4_set_RTS(sioport_t * port,int rts)2169 ioc4_set_RTS(sioport_t *port, int rts)
2170 {
2171 #ifdef NOT_YET
2172 ASSERT(L_LOCKED(port, L_SET_RTS));
2173 #endif
2174
2175 dprintf(("set rts port 0x%p, rts %d\n", (void *)port, rts));
2176 return(set_DTRRTS(port, rts, MCR_RTS, IOC4_SHADOW_RTS));
2177 }
2178
2179
2180 static int
ioc4_query_DCD(sioport_t * port)2181 ioc4_query_DCD(sioport_t *port)
2182 {
2183 ioc4port_t *p = LPORT(port);
2184 ioc4reg_t shadow;
2185
2186 #ifdef NOT_YET
2187 ASSERT(L_LOCKED(port, L_QUERY_DCD));
2188 #endif
2189
2190 dprintf(("get dcd port 0x%p\n", (void *)port));
2191
2192 shadow = PCI_INW(&p->ip_serial->shadow);
2193
2194 return(shadow & IOC4_SHADOW_DCD);
2195 }
2196
2197
2198 static int
ioc4_query_CTS(sioport_t * port)2199 ioc4_query_CTS(sioport_t *port)
2200 {
2201 ioc4port_t *p = LPORT(port);
2202 ioc4reg_t shadow;
2203
2204 #ifdef NOT_YET
2205 ASSERT(L_LOCKED(port, L_QUERY_CTS));
2206 #endif
2207
2208 dprintf(("get cts port 0x%p\n", (void *)port));
2209
2210 shadow = PCI_INW(&p->ip_serial->shadow);
2211
2212 return(shadow & IOC4_SHADOW_CTS);
2213 }
2214
2215
2216 static int
ioc4_set_proto(sioport_t * port,enum sio_proto proto)2217 ioc4_set_proto(sioport_t *port, enum sio_proto proto)
2218 {
2219 ioc4port_t *p = LPORT(port);
2220 struct hooks *hooks = p->ip_hooks;
2221
2222 #ifdef NOT_YET
2223 ASSERT(L_LOCKED(port, L_SET_PROTOCOL));
2224 #endif
2225
2226 switch(proto) {
2227 case PROTO_RS232:
2228 /* Clear the appropriate GIO pin */
2229 PCI_OUTW((&p->ip_ioc4->gppr_0 + H_RS422), 0);
2230 break;
2231
2232 case PROTO_RS422:
2233 /* Set the appropriate GIO pin */
2234 PCI_OUTW((&p->ip_ioc4->gppr_0 + H_RS422), 1);
2235 break;
2236
2237 default:
2238 return(1);
2239 }
2240
2241 return(0);
2242 }
2243
2244
2245 // #define IS_PORT_0(p) ((p)->ip_hooks == &hooks_array[0])
2246
2247 static int
ioc4_get_mapid(sioport_t * port,void * arg)2248 ioc4_get_mapid(sioport_t *port, void *arg)
2249 {
2250 return(0);
2251 }
2252
2253
2254 static int
ioc4_set_sscr(sioport_t * port,int arg,int flag)2255 ioc4_set_sscr(sioport_t *port, int arg, int flag)
2256 {
2257 ioc4port_t *p = LPORT(port);
2258
2259 if ( flag ) { /* reset arg bits in p->ip_sscr */
2260 p->ip_sscr &= ~arg;
2261 } else { /* set bits in p->ip_sscr */
2262 p->ip_sscr |= arg;
2263 }
2264 PCI_OUTW(&p->ip_serial->sscr, p->ip_sscr);
2265 return(p->ip_sscr);
2266 }
2267