1 /*
2  * drivers/serial/sh-sci.c
3  *
4  * SuperH on-chip serial module support.  (SCI with no FIFO / with FIFO)
5  *
6  *  Copyright (C) 2002 - 2011  Paul Mundt
7  *  Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
8  *
9  * based off of the old drivers/char/sh-sci.c by:
10  *
11  *   Copyright (C) 1999, 2000  Niibe Yutaka
12  *   Copyright (C) 2000  Sugioka Toshinobu
13  *   Modified to support multiple serial ports. Stuart Menefy (May 2000).
14  *   Modified to support SecureEdge. David McCullough (2002)
15  *   Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
16  *   Removed SH7300 support (Jul 2007).
17  *
18  * This file is subject to the terms and conditions of the GNU General Public
19  * License.  See the file "COPYING" in the main directory of this archive
20  * for more details.
21  */
22 #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
23 #define SUPPORT_SYSRQ
24 #endif
25 
26 #undef DEBUG
27 
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/timer.h>
31 #include <linux/interrupt.h>
32 #include <linux/tty.h>
33 #include <linux/tty_flip.h>
34 #include <linux/serial.h>
35 #include <linux/major.h>
36 #include <linux/string.h>
37 #include <linux/sysrq.h>
38 #include <linux/ioport.h>
39 #include <linux/mm.h>
40 #include <linux/init.h>
41 #include <linux/delay.h>
42 #include <linux/console.h>
43 #include <linux/platform_device.h>
44 #include <linux/serial_sci.h>
45 #include <linux/notifier.h>
46 #include <linux/cpufreq.h>
47 #include <linux/clk.h>
48 #include <linux/ctype.h>
49 #include <linux/err.h>
50 #include <linux/dmaengine.h>
51 #include <linux/scatterlist.h>
52 #include <linux/slab.h>
53 
54 #ifdef CONFIG_SUPERH
55 #include <asm/sh_bios.h>
56 #endif
57 
58 #ifdef CONFIG_H8300
59 #include <asm/gpio.h>
60 #endif
61 
62 #include "sh-sci.h"
63 
64 struct sci_port {
65 	struct uart_port	port;
66 
67 	/* Platform configuration */
68 	struct plat_sci_port	*cfg;
69 
70 	/* Port enable callback */
71 	void			(*enable)(struct uart_port *port);
72 
73 	/* Port disable callback */
74 	void			(*disable)(struct uart_port *port);
75 
76 	/* Break timer */
77 	struct timer_list	break_timer;
78 	int			break_flag;
79 
80 	/* Interface clock */
81 	struct clk		*iclk;
82 	/* Function clock */
83 	struct clk		*fclk;
84 
85 	struct dma_chan			*chan_tx;
86 	struct dma_chan			*chan_rx;
87 
88 #ifdef CONFIG_SERIAL_SH_SCI_DMA
89 	struct dma_async_tx_descriptor	*desc_tx;
90 	struct dma_async_tx_descriptor	*desc_rx[2];
91 	dma_cookie_t			cookie_tx;
92 	dma_cookie_t			cookie_rx[2];
93 	dma_cookie_t			active_rx;
94 	struct scatterlist		sg_tx;
95 	unsigned int			sg_len_tx;
96 	struct scatterlist		sg_rx[2];
97 	size_t				buf_len_rx;
98 	struct sh_dmae_slave		param_tx;
99 	struct sh_dmae_slave		param_rx;
100 	struct work_struct		work_tx;
101 	struct work_struct		work_rx;
102 	struct timer_list		rx_timer;
103 	unsigned int			rx_timeout;
104 #endif
105 
106 	struct notifier_block		freq_transition;
107 };
108 
109 /* Function prototypes */
110 static void sci_start_tx(struct uart_port *port);
111 static void sci_stop_tx(struct uart_port *port);
112 static void sci_start_rx(struct uart_port *port);
113 
114 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
115 
116 static struct sci_port sci_ports[SCI_NPORTS];
117 static struct uart_driver sci_uart_driver;
118 
119 static inline struct sci_port *
to_sci_port(struct uart_port * uart)120 to_sci_port(struct uart_port *uart)
121 {
122 	return container_of(uart, struct sci_port, port);
123 }
124 
125 #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
126 
127 #ifdef CONFIG_CONSOLE_POLL
sci_poll_get_char(struct uart_port * port)128 static int sci_poll_get_char(struct uart_port *port)
129 {
130 	unsigned short status;
131 	int c;
132 
133 	do {
134 		status = sci_in(port, SCxSR);
135 		if (status & SCxSR_ERRORS(port)) {
136 			sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
137 			continue;
138 		}
139 		break;
140 	} while (1);
141 
142 	if (!(status & SCxSR_RDxF(port)))
143 		return NO_POLL_CHAR;
144 
145 	c = sci_in(port, SCxRDR);
146 
147 	/* Dummy read */
148 	sci_in(port, SCxSR);
149 	sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
150 
151 	return c;
152 }
153 #endif
154 
sci_poll_put_char(struct uart_port * port,unsigned char c)155 static void sci_poll_put_char(struct uart_port *port, unsigned char c)
156 {
157 	unsigned short status;
158 
159 	do {
160 		status = sci_in(port, SCxSR);
161 	} while (!(status & SCxSR_TDxE(port)));
162 
163 	sci_out(port, SCxTDR, c);
164 	sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
165 }
166 #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
167 
168 #if defined(__H8300H__) || defined(__H8300S__)
sci_init_pins(struct uart_port * port,unsigned int cflag)169 static void sci_init_pins(struct uart_port *port, unsigned int cflag)
170 {
171 	int ch = (port->mapbase - SMR0) >> 3;
172 
173 	/* set DDR regs */
174 	H8300_GPIO_DDR(h8300_sci_pins[ch].port,
175 		       h8300_sci_pins[ch].rx,
176 		       H8300_GPIO_INPUT);
177 	H8300_GPIO_DDR(h8300_sci_pins[ch].port,
178 		       h8300_sci_pins[ch].tx,
179 		       H8300_GPIO_OUTPUT);
180 
181 	/* tx mark output*/
182 	H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx;
183 }
184 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
sci_init_pins(struct uart_port * port,unsigned int cflag)185 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
186 {
187 	if (port->mapbase == 0xA4400000) {
188 		__raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
189 		__raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
190 	} else if (port->mapbase == 0xA4410000)
191 		__raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
192 }
193 #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721)
sci_init_pins(struct uart_port * port,unsigned int cflag)194 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
195 {
196 	unsigned short data;
197 
198 	if (cflag & CRTSCTS) {
199 		/* enable RTS/CTS */
200 		if (port->mapbase == 0xa4430000) { /* SCIF0 */
201 			/* Clear PTCR bit 9-2; enable all scif pins but sck */
202 			data = __raw_readw(PORT_PTCR);
203 			__raw_writew((data & 0xfc03), PORT_PTCR);
204 		} else if (port->mapbase == 0xa4438000) { /* SCIF1 */
205 			/* Clear PVCR bit 9-2 */
206 			data = __raw_readw(PORT_PVCR);
207 			__raw_writew((data & 0xfc03), PORT_PVCR);
208 		}
209 	} else {
210 		if (port->mapbase == 0xa4430000) { /* SCIF0 */
211 			/* Clear PTCR bit 5-2; enable only tx and rx  */
212 			data = __raw_readw(PORT_PTCR);
213 			__raw_writew((data & 0xffc3), PORT_PTCR);
214 		} else if (port->mapbase == 0xa4438000) { /* SCIF1 */
215 			/* Clear PVCR bit 5-2 */
216 			data = __raw_readw(PORT_PVCR);
217 			__raw_writew((data & 0xffc3), PORT_PVCR);
218 		}
219 	}
220 }
221 #elif defined(CONFIG_CPU_SH3)
222 /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
sci_init_pins(struct uart_port * port,unsigned int cflag)223 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
224 {
225 	unsigned short data;
226 
227 	/* We need to set SCPCR to enable RTS/CTS */
228 	data = __raw_readw(SCPCR);
229 	/* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
230 	__raw_writew(data & 0x0fcf, SCPCR);
231 
232 	if (!(cflag & CRTSCTS)) {
233 		/* We need to set SCPCR to enable RTS/CTS */
234 		data = __raw_readw(SCPCR);
235 		/* Clear out SCP7MD1,0, SCP4MD1,0,
236 		   Set SCP6MD1,0 = {01} (output)  */
237 		__raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
238 
239 		data = __raw_readb(SCPDR);
240 		/* Set /RTS2 (bit6) = 0 */
241 		__raw_writeb(data & 0xbf, SCPDR);
242 	}
243 }
244 #elif defined(CONFIG_CPU_SUBTYPE_SH7722)
sci_init_pins(struct uart_port * port,unsigned int cflag)245 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
246 {
247 	unsigned short data;
248 
249 	if (port->mapbase == 0xffe00000) {
250 		data = __raw_readw(PSCR);
251 		data &= ~0x03cf;
252 		if (!(cflag & CRTSCTS))
253 			data |= 0x0340;
254 
255 		__raw_writew(data, PSCR);
256 	}
257 }
258 #elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
259       defined(CONFIG_CPU_SUBTYPE_SH7763) || \
260       defined(CONFIG_CPU_SUBTYPE_SH7780) || \
261       defined(CONFIG_CPU_SUBTYPE_SH7785) || \
262       defined(CONFIG_CPU_SUBTYPE_SH7786) || \
263       defined(CONFIG_CPU_SUBTYPE_SHX3)
sci_init_pins(struct uart_port * port,unsigned int cflag)264 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
265 {
266 	if (!(cflag & CRTSCTS))
267 		__raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */
268 }
269 #elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A)
sci_init_pins(struct uart_port * port,unsigned int cflag)270 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
271 {
272 	if (!(cflag & CRTSCTS))
273 		__raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */
274 }
275 #else
sci_init_pins(struct uart_port * port,unsigned int cflag)276 static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
277 {
278 	/* Nothing to do */
279 }
280 #endif
281 
282 #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
283     defined(CONFIG_CPU_SUBTYPE_SH7780) || \
284     defined(CONFIG_CPU_SUBTYPE_SH7785) || \
285     defined(CONFIG_CPU_SUBTYPE_SH7786)
scif_txfill(struct uart_port * port)286 static int scif_txfill(struct uart_port *port)
287 {
288 	return sci_in(port, SCTFDR) & 0xff;
289 }
290 
scif_txroom(struct uart_port * port)291 static int scif_txroom(struct uart_port *port)
292 {
293 	return SCIF_TXROOM_MAX - scif_txfill(port);
294 }
295 
scif_rxfill(struct uart_port * port)296 static int scif_rxfill(struct uart_port *port)
297 {
298 	return sci_in(port, SCRFDR) & 0xff;
299 }
300 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
scif_txfill(struct uart_port * port)301 static int scif_txfill(struct uart_port *port)
302 {
303 	if (port->mapbase == 0xffe00000 ||
304 	    port->mapbase == 0xffe08000)
305 		/* SCIF0/1*/
306 		return sci_in(port, SCTFDR) & 0xff;
307 	else
308 		/* SCIF2 */
309 		return sci_in(port, SCFDR) >> 8;
310 }
311 
scif_txroom(struct uart_port * port)312 static int scif_txroom(struct uart_port *port)
313 {
314 	if (port->mapbase == 0xffe00000 ||
315 	    port->mapbase == 0xffe08000)
316 		/* SCIF0/1*/
317 		return SCIF_TXROOM_MAX - scif_txfill(port);
318 	else
319 		/* SCIF2 */
320 		return SCIF2_TXROOM_MAX - scif_txfill(port);
321 }
322 
scif_rxfill(struct uart_port * port)323 static int scif_rxfill(struct uart_port *port)
324 {
325 	if ((port->mapbase == 0xffe00000) ||
326 	    (port->mapbase == 0xffe08000)) {
327 		/* SCIF0/1*/
328 		return sci_in(port, SCRFDR) & 0xff;
329 	} else {
330 		/* SCIF2 */
331 		return sci_in(port, SCFDR) & SCIF2_RFDC_MASK;
332 	}
333 }
334 #elif defined(CONFIG_ARCH_SH7372)
scif_txfill(struct uart_port * port)335 static int scif_txfill(struct uart_port *port)
336 {
337 	if (port->type == PORT_SCIFA)
338 		return sci_in(port, SCFDR) >> 8;
339 	else
340 		return sci_in(port, SCTFDR);
341 }
342 
scif_txroom(struct uart_port * port)343 static int scif_txroom(struct uart_port *port)
344 {
345 	return port->fifosize - scif_txfill(port);
346 }
347 
scif_rxfill(struct uart_port * port)348 static int scif_rxfill(struct uart_port *port)
349 {
350 	if (port->type == PORT_SCIFA)
351 		return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
352 	else
353 		return sci_in(port, SCRFDR);
354 }
355 #else
scif_txfill(struct uart_port * port)356 static int scif_txfill(struct uart_port *port)
357 {
358 	return sci_in(port, SCFDR) >> 8;
359 }
360 
scif_txroom(struct uart_port * port)361 static int scif_txroom(struct uart_port *port)
362 {
363 	return SCIF_TXROOM_MAX - scif_txfill(port);
364 }
365 
scif_rxfill(struct uart_port * port)366 static int scif_rxfill(struct uart_port *port)
367 {
368 	return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
369 }
370 #endif
371 
sci_txfill(struct uart_port * port)372 static int sci_txfill(struct uart_port *port)
373 {
374 	return !(sci_in(port, SCxSR) & SCI_TDRE);
375 }
376 
sci_txroom(struct uart_port * port)377 static int sci_txroom(struct uart_port *port)
378 {
379 	return !sci_txfill(port);
380 }
381 
sci_rxfill(struct uart_port * port)382 static int sci_rxfill(struct uart_port *port)
383 {
384 	return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
385 }
386 
387 /* ********************************************************************** *
388  *                   the interrupt related routines                       *
389  * ********************************************************************** */
390 
sci_transmit_chars(struct uart_port * port)391 static void sci_transmit_chars(struct uart_port *port)
392 {
393 	struct circ_buf *xmit = &port->state->xmit;
394 	unsigned int stopped = uart_tx_stopped(port);
395 	unsigned short status;
396 	unsigned short ctrl;
397 	int count;
398 
399 	status = sci_in(port, SCxSR);
400 	if (!(status & SCxSR_TDxE(port))) {
401 		ctrl = sci_in(port, SCSCR);
402 		if (uart_circ_empty(xmit))
403 			ctrl &= ~SCSCR_TIE;
404 		else
405 			ctrl |= SCSCR_TIE;
406 		sci_out(port, SCSCR, ctrl);
407 		return;
408 	}
409 
410 	if (port->type == PORT_SCI)
411 		count = sci_txroom(port);
412 	else
413 		count = scif_txroom(port);
414 
415 	do {
416 		unsigned char c;
417 
418 		if (port->x_char) {
419 			c = port->x_char;
420 			port->x_char = 0;
421 		} else if (!uart_circ_empty(xmit) && !stopped) {
422 			c = xmit->buf[xmit->tail];
423 			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
424 		} else {
425 			break;
426 		}
427 
428 		sci_out(port, SCxTDR, c);
429 
430 		port->icount.tx++;
431 	} while (--count > 0);
432 
433 	sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
434 
435 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
436 		uart_write_wakeup(port);
437 	if (uart_circ_empty(xmit)) {
438 		sci_stop_tx(port);
439 	} else {
440 		ctrl = sci_in(port, SCSCR);
441 
442 		if (port->type != PORT_SCI) {
443 			sci_in(port, SCxSR); /* Dummy read */
444 			sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
445 		}
446 
447 		ctrl |= SCSCR_TIE;
448 		sci_out(port, SCSCR, ctrl);
449 	}
450 }
451 
452 /* On SH3, SCIF may read end-of-break as a space->mark char */
453 #define STEPFN(c)  ({int __c = (c); (((__c-1)|(__c)) == -1); })
454 
sci_receive_chars(struct uart_port * port)455 static void sci_receive_chars(struct uart_port *port)
456 {
457 	struct sci_port *sci_port = to_sci_port(port);
458 	struct tty_struct *tty = port->state->port.tty;
459 	int i, count, copied = 0;
460 	unsigned short status;
461 	unsigned char flag;
462 
463 	status = sci_in(port, SCxSR);
464 	if (!(status & SCxSR_RDxF(port)))
465 		return;
466 
467 	while (1) {
468 		if (port->type == PORT_SCI)
469 			count = sci_rxfill(port);
470 		else
471 			count = scif_rxfill(port);
472 
473 		/* Don't copy more bytes than there is room for in the buffer */
474 		count = tty_buffer_request_room(tty, count);
475 
476 		/* If for any reason we can't copy more data, we're done! */
477 		if (count == 0)
478 			break;
479 
480 		if (port->type == PORT_SCI) {
481 			char c = sci_in(port, SCxRDR);
482 			if (uart_handle_sysrq_char(port, c) ||
483 			    sci_port->break_flag)
484 				count = 0;
485 			else
486 				tty_insert_flip_char(tty, c, TTY_NORMAL);
487 		} else {
488 			for (i = 0; i < count; i++) {
489 				char c = sci_in(port, SCxRDR);
490 				status = sci_in(port, SCxSR);
491 #if defined(CONFIG_CPU_SH3)
492 				/* Skip "chars" during break */
493 				if (sci_port->break_flag) {
494 					if ((c == 0) &&
495 					    (status & SCxSR_FER(port))) {
496 						count--; i--;
497 						continue;
498 					}
499 
500 					/* Nonzero => end-of-break */
501 					dev_dbg(port->dev, "debounce<%02x>\n", c);
502 					sci_port->break_flag = 0;
503 
504 					if (STEPFN(c)) {
505 						count--; i--;
506 						continue;
507 					}
508 				}
509 #endif /* CONFIG_CPU_SH3 */
510 				if (uart_handle_sysrq_char(port, c)) {
511 					count--; i--;
512 					continue;
513 				}
514 
515 				/* Store data and status */
516 				if (status & SCxSR_FER(port)) {
517 					flag = TTY_FRAME;
518 					dev_notice(port->dev, "frame error\n");
519 				} else if (status & SCxSR_PER(port)) {
520 					flag = TTY_PARITY;
521 					dev_notice(port->dev, "parity error\n");
522 				} else
523 					flag = TTY_NORMAL;
524 
525 				tty_insert_flip_char(tty, c, flag);
526 			}
527 		}
528 
529 		sci_in(port, SCxSR); /* dummy read */
530 		sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
531 
532 		copied += count;
533 		port->icount.rx += count;
534 	}
535 
536 	if (copied) {
537 		/* Tell the rest of the system the news. New characters! */
538 		tty_flip_buffer_push(tty);
539 	} else {
540 		sci_in(port, SCxSR); /* dummy read */
541 		sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
542 	}
543 }
544 
545 #define SCI_BREAK_JIFFIES (HZ/20)
546 
547 /*
548  * The sci generates interrupts during the break,
549  * 1 per millisecond or so during the break period, for 9600 baud.
550  * So dont bother disabling interrupts.
551  * But dont want more than 1 break event.
552  * Use a kernel timer to periodically poll the rx line until
553  * the break is finished.
554  */
sci_schedule_break_timer(struct sci_port * port)555 static inline void sci_schedule_break_timer(struct sci_port *port)
556 {
557 	mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
558 }
559 
560 /* Ensure that two consecutive samples find the break over. */
sci_break_timer(unsigned long data)561 static void sci_break_timer(unsigned long data)
562 {
563 	struct sci_port *port = (struct sci_port *)data;
564 
565 	if (sci_rxd_in(&port->port) == 0) {
566 		port->break_flag = 1;
567 		sci_schedule_break_timer(port);
568 	} else if (port->break_flag == 1) {
569 		/* break is over. */
570 		port->break_flag = 2;
571 		sci_schedule_break_timer(port);
572 	} else
573 		port->break_flag = 0;
574 }
575 
sci_handle_errors(struct uart_port * port)576 static int sci_handle_errors(struct uart_port *port)
577 {
578 	int copied = 0;
579 	unsigned short status = sci_in(port, SCxSR);
580 	struct tty_struct *tty = port->state->port.tty;
581 
582 	if (status & SCxSR_ORER(port)) {
583 		/* overrun error */
584 		if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
585 			copied++;
586 
587 		dev_notice(port->dev, "overrun error");
588 	}
589 
590 	if (status & SCxSR_FER(port)) {
591 		if (sci_rxd_in(port) == 0) {
592 			/* Notify of BREAK */
593 			struct sci_port *sci_port = to_sci_port(port);
594 
595 			if (!sci_port->break_flag) {
596 				sci_port->break_flag = 1;
597 				sci_schedule_break_timer(sci_port);
598 
599 				/* Do sysrq handling. */
600 				if (uart_handle_break(port))
601 					return 0;
602 
603 				dev_dbg(port->dev, "BREAK detected\n");
604 
605 				if (tty_insert_flip_char(tty, 0, TTY_BREAK))
606 					copied++;
607 			}
608 
609 		} else {
610 			/* frame error */
611 			if (tty_insert_flip_char(tty, 0, TTY_FRAME))
612 				copied++;
613 
614 			dev_notice(port->dev, "frame error\n");
615 		}
616 	}
617 
618 	if (status & SCxSR_PER(port)) {
619 		/* parity error */
620 		if (tty_insert_flip_char(tty, 0, TTY_PARITY))
621 			copied++;
622 
623 		dev_notice(port->dev, "parity error");
624 	}
625 
626 	if (copied)
627 		tty_flip_buffer_push(tty);
628 
629 	return copied;
630 }
631 
sci_handle_fifo_overrun(struct uart_port * port)632 static int sci_handle_fifo_overrun(struct uart_port *port)
633 {
634 	struct tty_struct *tty = port->state->port.tty;
635 	int copied = 0;
636 
637 	if (port->type != PORT_SCIF)
638 		return 0;
639 
640 	if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) {
641 		sci_out(port, SCLSR, 0);
642 
643 		tty_insert_flip_char(tty, 0, TTY_OVERRUN);
644 		tty_flip_buffer_push(tty);
645 
646 		dev_notice(port->dev, "overrun error\n");
647 		copied++;
648 	}
649 
650 	return copied;
651 }
652 
sci_handle_breaks(struct uart_port * port)653 static int sci_handle_breaks(struct uart_port *port)
654 {
655 	int copied = 0;
656 	unsigned short status = sci_in(port, SCxSR);
657 	struct tty_struct *tty = port->state->port.tty;
658 	struct sci_port *s = to_sci_port(port);
659 
660 	if (uart_handle_break(port))
661 		return 0;
662 
663 	if (!s->break_flag && status & SCxSR_BRK(port)) {
664 #if defined(CONFIG_CPU_SH3)
665 		/* Debounce break */
666 		s->break_flag = 1;
667 #endif
668 		/* Notify of BREAK */
669 		if (tty_insert_flip_char(tty, 0, TTY_BREAK))
670 			copied++;
671 
672 		dev_dbg(port->dev, "BREAK detected\n");
673 	}
674 
675 	if (copied)
676 		tty_flip_buffer_push(tty);
677 
678 	copied += sci_handle_fifo_overrun(port);
679 
680 	return copied;
681 }
682 
sci_rx_interrupt(int irq,void * ptr)683 static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
684 {
685 #ifdef CONFIG_SERIAL_SH_SCI_DMA
686 	struct uart_port *port = ptr;
687 	struct sci_port *s = to_sci_port(port);
688 
689 	if (s->chan_rx) {
690 		u16 scr = sci_in(port, SCSCR);
691 		u16 ssr = sci_in(port, SCxSR);
692 
693 		/* Disable future Rx interrupts */
694 		if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
695 			disable_irq_nosync(irq);
696 			scr |= 0x4000;
697 		} else {
698 			scr &= ~SCSCR_RIE;
699 		}
700 		sci_out(port, SCSCR, scr);
701 		/* Clear current interrupt */
702 		sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
703 		dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
704 			jiffies, s->rx_timeout);
705 		mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
706 
707 		return IRQ_HANDLED;
708 	}
709 #endif
710 
711 	/* I think sci_receive_chars has to be called irrespective
712 	 * of whether the I_IXOFF is set, otherwise, how is the interrupt
713 	 * to be disabled?
714 	 */
715 	sci_receive_chars(ptr);
716 
717 	return IRQ_HANDLED;
718 }
719 
sci_tx_interrupt(int irq,void * ptr)720 static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
721 {
722 	struct uart_port *port = ptr;
723 	unsigned long flags;
724 
725 	spin_lock_irqsave(&port->lock, flags);
726 	sci_transmit_chars(port);
727 	spin_unlock_irqrestore(&port->lock, flags);
728 
729 	return IRQ_HANDLED;
730 }
731 
sci_er_interrupt(int irq,void * ptr)732 static irqreturn_t sci_er_interrupt(int irq, void *ptr)
733 {
734 	struct uart_port *port = ptr;
735 
736 	/* Handle errors */
737 	if (port->type == PORT_SCI) {
738 		if (sci_handle_errors(port)) {
739 			/* discard character in rx buffer */
740 			sci_in(port, SCxSR);
741 			sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
742 		}
743 	} else {
744 		sci_handle_fifo_overrun(port);
745 		sci_rx_interrupt(irq, ptr);
746 	}
747 
748 	sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
749 
750 	/* Kick the transmission */
751 	sci_tx_interrupt(irq, ptr);
752 
753 	return IRQ_HANDLED;
754 }
755 
sci_br_interrupt(int irq,void * ptr)756 static irqreturn_t sci_br_interrupt(int irq, void *ptr)
757 {
758 	struct uart_port *port = ptr;
759 
760 	/* Handle BREAKs */
761 	sci_handle_breaks(port);
762 	sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port));
763 
764 	return IRQ_HANDLED;
765 }
766 
port_rx_irq_mask(struct uart_port * port)767 static inline unsigned long port_rx_irq_mask(struct uart_port *port)
768 {
769 	/*
770 	 * Not all ports (such as SCIFA) will support REIE. Rather than
771 	 * special-casing the port type, we check the port initialization
772 	 * IRQ enable mask to see whether the IRQ is desired at all. If
773 	 * it's unset, it's logically inferred that there's no point in
774 	 * testing for it.
775 	 */
776 	return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
777 }
778 
sci_mpxed_interrupt(int irq,void * ptr)779 static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
780 {
781 	unsigned short ssr_status, scr_status, err_enabled;
782 	struct uart_port *port = ptr;
783 	struct sci_port *s = to_sci_port(port);
784 	irqreturn_t ret = IRQ_NONE;
785 
786 	ssr_status = sci_in(port, SCxSR);
787 	scr_status = sci_in(port, SCSCR);
788 	err_enabled = scr_status & port_rx_irq_mask(port);
789 
790 	/* Tx Interrupt */
791 	if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
792 	    !s->chan_tx)
793 		ret = sci_tx_interrupt(irq, ptr);
794 
795 	/*
796 	 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
797 	 * DR flags
798 	 */
799 	if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
800 	    (scr_status & SCSCR_RIE))
801 		ret = sci_rx_interrupt(irq, ptr);
802 
803 	/* Error Interrupt */
804 	if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
805 		ret = sci_er_interrupt(irq, ptr);
806 
807 	/* Break Interrupt */
808 	if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
809 		ret = sci_br_interrupt(irq, ptr);
810 
811 	return ret;
812 }
813 
814 /*
815  * Here we define a transition notifier so that we can update all of our
816  * ports' baud rate when the peripheral clock changes.
817  */
sci_notifier(struct notifier_block * self,unsigned long phase,void * p)818 static int sci_notifier(struct notifier_block *self,
819 			unsigned long phase, void *p)
820 {
821 	struct sci_port *sci_port;
822 	unsigned long flags;
823 
824 	sci_port = container_of(self, struct sci_port, freq_transition);
825 
826 	if ((phase == CPUFREQ_POSTCHANGE) ||
827 	    (phase == CPUFREQ_RESUMECHANGE)) {
828 		struct uart_port *port = &sci_port->port;
829 
830 		spin_lock_irqsave(&port->lock, flags);
831 		port->uartclk = clk_get_rate(sci_port->iclk);
832 		spin_unlock_irqrestore(&port->lock, flags);
833 	}
834 
835 	return NOTIFY_OK;
836 }
837 
sci_clk_enable(struct uart_port * port)838 static void sci_clk_enable(struct uart_port *port)
839 {
840 	struct sci_port *sci_port = to_sci_port(port);
841 
842 	clk_enable(sci_port->iclk);
843 	sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
844 	clk_enable(sci_port->fclk);
845 }
846 
sci_clk_disable(struct uart_port * port)847 static void sci_clk_disable(struct uart_port *port)
848 {
849 	struct sci_port *sci_port = to_sci_port(port);
850 
851 	clk_disable(sci_port->fclk);
852 	clk_disable(sci_port->iclk);
853 }
854 
sci_request_irq(struct sci_port * port)855 static int sci_request_irq(struct sci_port *port)
856 {
857 	int i;
858 	irqreturn_t (*handlers[4])(int irq, void *ptr) = {
859 		sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt,
860 		sci_br_interrupt,
861 	};
862 	const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
863 			       "SCI Transmit Data Empty", "SCI Break" };
864 
865 	if (port->cfg->irqs[0] == port->cfg->irqs[1]) {
866 		if (unlikely(!port->cfg->irqs[0]))
867 			return -ENODEV;
868 
869 		if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt,
870 				IRQF_DISABLED, "sci", port)) {
871 			dev_err(port->port.dev, "Can't allocate IRQ\n");
872 			return -ENODEV;
873 		}
874 	} else {
875 		for (i = 0; i < ARRAY_SIZE(handlers); i++) {
876 			if (unlikely(!port->cfg->irqs[i]))
877 				continue;
878 
879 			if (request_irq(port->cfg->irqs[i], handlers[i],
880 					IRQF_DISABLED, desc[i], port)) {
881 				dev_err(port->port.dev, "Can't allocate IRQ\n");
882 				return -ENODEV;
883 			}
884 		}
885 	}
886 
887 	return 0;
888 }
889 
sci_free_irq(struct sci_port * port)890 static void sci_free_irq(struct sci_port *port)
891 {
892 	int i;
893 
894 	if (port->cfg->irqs[0] == port->cfg->irqs[1])
895 		free_irq(port->cfg->irqs[0], port);
896 	else {
897 		for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) {
898 			if (!port->cfg->irqs[i])
899 				continue;
900 
901 			free_irq(port->cfg->irqs[i], port);
902 		}
903 	}
904 }
905 
sci_tx_empty(struct uart_port * port)906 static unsigned int sci_tx_empty(struct uart_port *port)
907 {
908 	unsigned short status = sci_in(port, SCxSR);
909 	unsigned short in_tx_fifo = scif_txfill(port);
910 
911 	return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
912 }
913 
sci_set_mctrl(struct uart_port * port,unsigned int mctrl)914 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
915 {
916 	/* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
917 	/* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
918 	/* If you have signals for DTR and DCD, please implement here. */
919 }
920 
sci_get_mctrl(struct uart_port * port)921 static unsigned int sci_get_mctrl(struct uart_port *port)
922 {
923 	/* This routine is used for getting signals of: DTR, DCD, DSR, RI,
924 	   and CTS/RTS */
925 
926 	return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR;
927 }
928 
929 #ifdef CONFIG_SERIAL_SH_SCI_DMA
sci_dma_tx_complete(void * arg)930 static void sci_dma_tx_complete(void *arg)
931 {
932 	struct sci_port *s = arg;
933 	struct uart_port *port = &s->port;
934 	struct circ_buf *xmit = &port->state->xmit;
935 	unsigned long flags;
936 
937 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
938 
939 	spin_lock_irqsave(&port->lock, flags);
940 
941 	xmit->tail += sg_dma_len(&s->sg_tx);
942 	xmit->tail &= UART_XMIT_SIZE - 1;
943 
944 	port->icount.tx += sg_dma_len(&s->sg_tx);
945 
946 	async_tx_ack(s->desc_tx);
947 	s->cookie_tx = -EINVAL;
948 	s->desc_tx = NULL;
949 
950 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
951 		uart_write_wakeup(port);
952 
953 	if (!uart_circ_empty(xmit)) {
954 		schedule_work(&s->work_tx);
955 	} else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
956 		u16 ctrl = sci_in(port, SCSCR);
957 		sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
958 	}
959 
960 	spin_unlock_irqrestore(&port->lock, flags);
961 }
962 
963 /* Locking: called with port lock held */
sci_dma_rx_push(struct sci_port * s,struct tty_struct * tty,size_t count)964 static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
965 			   size_t count)
966 {
967 	struct uart_port *port = &s->port;
968 	int i, active, room;
969 
970 	room = tty_buffer_request_room(tty, count);
971 
972 	if (s->active_rx == s->cookie_rx[0]) {
973 		active = 0;
974 	} else if (s->active_rx == s->cookie_rx[1]) {
975 		active = 1;
976 	} else {
977 		dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
978 		return 0;
979 	}
980 
981 	if (room < count)
982 		dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
983 			 count - room);
984 	if (!room)
985 		return room;
986 
987 	for (i = 0; i < room; i++)
988 		tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
989 				     TTY_NORMAL);
990 
991 	port->icount.rx += room;
992 
993 	return room;
994 }
995 
sci_dma_rx_complete(void * arg)996 static void sci_dma_rx_complete(void *arg)
997 {
998 	struct sci_port *s = arg;
999 	struct uart_port *port = &s->port;
1000 	struct tty_struct *tty = port->state->port.tty;
1001 	unsigned long flags;
1002 	int count;
1003 
1004 	dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
1005 
1006 	spin_lock_irqsave(&port->lock, flags);
1007 
1008 	count = sci_dma_rx_push(s, tty, s->buf_len_rx);
1009 
1010 	mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
1011 
1012 	spin_unlock_irqrestore(&port->lock, flags);
1013 
1014 	if (count)
1015 		tty_flip_buffer_push(tty);
1016 
1017 	schedule_work(&s->work_rx);
1018 }
1019 
sci_rx_dma_release(struct sci_port * s,bool enable_pio)1020 static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
1021 {
1022 	struct dma_chan *chan = s->chan_rx;
1023 	struct uart_port *port = &s->port;
1024 
1025 	s->chan_rx = NULL;
1026 	s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1027 	dma_release_channel(chan);
1028 	if (sg_dma_address(&s->sg_rx[0]))
1029 		dma_free_coherent(port->dev, s->buf_len_rx * 2,
1030 				  sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1031 	if (enable_pio)
1032 		sci_start_rx(port);
1033 }
1034 
sci_tx_dma_release(struct sci_port * s,bool enable_pio)1035 static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
1036 {
1037 	struct dma_chan *chan = s->chan_tx;
1038 	struct uart_port *port = &s->port;
1039 
1040 	s->chan_tx = NULL;
1041 	s->cookie_tx = -EINVAL;
1042 	dma_release_channel(chan);
1043 	if (enable_pio)
1044 		sci_start_tx(port);
1045 }
1046 
sci_submit_rx(struct sci_port * s)1047 static void sci_submit_rx(struct sci_port *s)
1048 {
1049 	struct dma_chan *chan = s->chan_rx;
1050 	int i;
1051 
1052 	for (i = 0; i < 2; i++) {
1053 		struct scatterlist *sg = &s->sg_rx[i];
1054 		struct dma_async_tx_descriptor *desc;
1055 
1056 		desc = chan->device->device_prep_slave_sg(chan,
1057 			sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
1058 
1059 		if (desc) {
1060 			s->desc_rx[i] = desc;
1061 			desc->callback = sci_dma_rx_complete;
1062 			desc->callback_param = s;
1063 			s->cookie_rx[i] = desc->tx_submit(desc);
1064 		}
1065 
1066 		if (!desc || s->cookie_rx[i] < 0) {
1067 			if (i) {
1068 				async_tx_ack(s->desc_rx[0]);
1069 				s->cookie_rx[0] = -EINVAL;
1070 			}
1071 			if (desc) {
1072 				async_tx_ack(desc);
1073 				s->cookie_rx[i] = -EINVAL;
1074 			}
1075 			dev_warn(s->port.dev,
1076 				 "failed to re-start DMA, using PIO\n");
1077 			sci_rx_dma_release(s, true);
1078 			return;
1079 		}
1080 		dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1081 			s->cookie_rx[i], i);
1082 	}
1083 
1084 	s->active_rx = s->cookie_rx[0];
1085 
1086 	dma_async_issue_pending(chan);
1087 }
1088 
work_fn_rx(struct work_struct * work)1089 static void work_fn_rx(struct work_struct *work)
1090 {
1091 	struct sci_port *s = container_of(work, struct sci_port, work_rx);
1092 	struct uart_port *port = &s->port;
1093 	struct dma_async_tx_descriptor *desc;
1094 	int new;
1095 
1096 	if (s->active_rx == s->cookie_rx[0]) {
1097 		new = 0;
1098 	} else if (s->active_rx == s->cookie_rx[1]) {
1099 		new = 1;
1100 	} else {
1101 		dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1102 		return;
1103 	}
1104 	desc = s->desc_rx[new];
1105 
1106 	if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1107 	    DMA_SUCCESS) {
1108 		/* Handle incomplete DMA receive */
1109 		struct tty_struct *tty = port->state->port.tty;
1110 		struct dma_chan *chan = s->chan_rx;
1111 		struct sh_desc *sh_desc = container_of(desc, struct sh_desc,
1112 						       async_tx);
1113 		unsigned long flags;
1114 		int count;
1115 
1116 		chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
1117 		dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
1118 			sh_desc->partial, sh_desc->cookie);
1119 
1120 		spin_lock_irqsave(&port->lock, flags);
1121 		count = sci_dma_rx_push(s, tty, sh_desc->partial);
1122 		spin_unlock_irqrestore(&port->lock, flags);
1123 
1124 		if (count)
1125 			tty_flip_buffer_push(tty);
1126 
1127 		sci_submit_rx(s);
1128 
1129 		return;
1130 	}
1131 
1132 	s->cookie_rx[new] = desc->tx_submit(desc);
1133 	if (s->cookie_rx[new] < 0) {
1134 		dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1135 		sci_rx_dma_release(s, true);
1136 		return;
1137 	}
1138 
1139 	s->active_rx = s->cookie_rx[!new];
1140 
1141 	dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1142 		s->cookie_rx[new], new, s->active_rx);
1143 }
1144 
work_fn_tx(struct work_struct * work)1145 static void work_fn_tx(struct work_struct *work)
1146 {
1147 	struct sci_port *s = container_of(work, struct sci_port, work_tx);
1148 	struct dma_async_tx_descriptor *desc;
1149 	struct dma_chan *chan = s->chan_tx;
1150 	struct uart_port *port = &s->port;
1151 	struct circ_buf *xmit = &port->state->xmit;
1152 	struct scatterlist *sg = &s->sg_tx;
1153 
1154 	/*
1155 	 * DMA is idle now.
1156 	 * Port xmit buffer is already mapped, and it is one page... Just adjust
1157 	 * offsets and lengths. Since it is a circular buffer, we have to
1158 	 * transmit till the end, and then the rest. Take the port lock to get a
1159 	 * consistent xmit buffer state.
1160 	 */
1161 	spin_lock_irq(&port->lock);
1162 	sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1163 	sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1164 		sg->offset;
1165 	sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1166 		CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1167 	spin_unlock_irq(&port->lock);
1168 
1169 	BUG_ON(!sg_dma_len(sg));
1170 
1171 	desc = chan->device->device_prep_slave_sg(chan,
1172 			sg, s->sg_len_tx, DMA_TO_DEVICE,
1173 			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1174 	if (!desc) {
1175 		/* switch to PIO */
1176 		sci_tx_dma_release(s, true);
1177 		return;
1178 	}
1179 
1180 	dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
1181 
1182 	spin_lock_irq(&port->lock);
1183 	s->desc_tx = desc;
1184 	desc->callback = sci_dma_tx_complete;
1185 	desc->callback_param = s;
1186 	spin_unlock_irq(&port->lock);
1187 	s->cookie_tx = desc->tx_submit(desc);
1188 	if (s->cookie_tx < 0) {
1189 		dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1190 		/* switch to PIO */
1191 		sci_tx_dma_release(s, true);
1192 		return;
1193 	}
1194 
1195 	dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
1196 		xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
1197 
1198 	dma_async_issue_pending(chan);
1199 }
1200 #endif
1201 
sci_start_tx(struct uart_port * port)1202 static void sci_start_tx(struct uart_port *port)
1203 {
1204 	struct sci_port *s = to_sci_port(port);
1205 	unsigned short ctrl;
1206 
1207 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1208 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1209 		u16 new, scr = sci_in(port, SCSCR);
1210 		if (s->chan_tx)
1211 			new = scr | 0x8000;
1212 		else
1213 			new = scr & ~0x8000;
1214 		if (new != scr)
1215 			sci_out(port, SCSCR, new);
1216 	}
1217 
1218 	if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1219 	    s->cookie_tx < 0)
1220 		schedule_work(&s->work_tx);
1221 #endif
1222 
1223 	if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1224 		/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1225 		ctrl = sci_in(port, SCSCR);
1226 		sci_out(port, SCSCR, ctrl | SCSCR_TIE);
1227 	}
1228 }
1229 
sci_stop_tx(struct uart_port * port)1230 static void sci_stop_tx(struct uart_port *port)
1231 {
1232 	unsigned short ctrl;
1233 
1234 	/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1235 	ctrl = sci_in(port, SCSCR);
1236 
1237 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1238 		ctrl &= ~0x8000;
1239 
1240 	ctrl &= ~SCSCR_TIE;
1241 
1242 	sci_out(port, SCSCR, ctrl);
1243 }
1244 
sci_start_rx(struct uart_port * port)1245 static void sci_start_rx(struct uart_port *port)
1246 {
1247 	unsigned short ctrl;
1248 
1249 	ctrl = sci_in(port, SCSCR) | port_rx_irq_mask(port);
1250 
1251 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1252 		ctrl &= ~0x4000;
1253 
1254 	sci_out(port, SCSCR, ctrl);
1255 }
1256 
sci_stop_rx(struct uart_port * port)1257 static void sci_stop_rx(struct uart_port *port)
1258 {
1259 	unsigned short ctrl;
1260 
1261 	ctrl = sci_in(port, SCSCR);
1262 
1263 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
1264 		ctrl &= ~0x4000;
1265 
1266 	ctrl &= ~port_rx_irq_mask(port);
1267 
1268 	sci_out(port, SCSCR, ctrl);
1269 }
1270 
sci_enable_ms(struct uart_port * port)1271 static void sci_enable_ms(struct uart_port *port)
1272 {
1273 	/* Nothing here yet .. */
1274 }
1275 
sci_break_ctl(struct uart_port * port,int break_state)1276 static void sci_break_ctl(struct uart_port *port, int break_state)
1277 {
1278 	/* Nothing here yet .. */
1279 }
1280 
1281 #ifdef CONFIG_SERIAL_SH_SCI_DMA
filter(struct dma_chan * chan,void * slave)1282 static bool filter(struct dma_chan *chan, void *slave)
1283 {
1284 	struct sh_dmae_slave *param = slave;
1285 
1286 	dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
1287 		param->slave_id);
1288 
1289 	if (param->dma_dev == chan->device->dev) {
1290 		chan->private = param;
1291 		return true;
1292 	} else {
1293 		return false;
1294 	}
1295 }
1296 
rx_timer_fn(unsigned long arg)1297 static void rx_timer_fn(unsigned long arg)
1298 {
1299 	struct sci_port *s = (struct sci_port *)arg;
1300 	struct uart_port *port = &s->port;
1301 	u16 scr = sci_in(port, SCSCR);
1302 
1303 	if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1304 		scr &= ~0x4000;
1305 		enable_irq(s->cfg->irqs[1]);
1306 	}
1307 	sci_out(port, SCSCR, scr | SCSCR_RIE);
1308 	dev_dbg(port->dev, "DMA Rx timed out\n");
1309 	schedule_work(&s->work_rx);
1310 }
1311 
sci_request_dma(struct uart_port * port)1312 static void sci_request_dma(struct uart_port *port)
1313 {
1314 	struct sci_port *s = to_sci_port(port);
1315 	struct sh_dmae_slave *param;
1316 	struct dma_chan *chan;
1317 	dma_cap_mask_t mask;
1318 	int nent;
1319 
1320 	dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
1321 		port->line, s->cfg->dma_dev);
1322 
1323 	if (!s->cfg->dma_dev)
1324 		return;
1325 
1326 	dma_cap_zero(mask);
1327 	dma_cap_set(DMA_SLAVE, mask);
1328 
1329 	param = &s->param_tx;
1330 
1331 	/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1332 	param->slave_id = s->cfg->dma_slave_tx;
1333 	param->dma_dev = s->cfg->dma_dev;
1334 
1335 	s->cookie_tx = -EINVAL;
1336 	chan = dma_request_channel(mask, filter, param);
1337 	dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1338 	if (chan) {
1339 		s->chan_tx = chan;
1340 		sg_init_table(&s->sg_tx, 1);
1341 		/* UART circular tx buffer is an aligned page. */
1342 		BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
1343 		sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
1344 			    UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
1345 		nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
1346 		if (!nent)
1347 			sci_tx_dma_release(s, false);
1348 		else
1349 			dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
1350 				sg_dma_len(&s->sg_tx),
1351 				port->state->xmit.buf, sg_dma_address(&s->sg_tx));
1352 
1353 		s->sg_len_tx = nent;
1354 
1355 		INIT_WORK(&s->work_tx, work_fn_tx);
1356 	}
1357 
1358 	param = &s->param_rx;
1359 
1360 	/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1361 	param->slave_id = s->cfg->dma_slave_rx;
1362 	param->dma_dev = s->cfg->dma_dev;
1363 
1364 	chan = dma_request_channel(mask, filter, param);
1365 	dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1366 	if (chan) {
1367 		dma_addr_t dma[2];
1368 		void *buf[2];
1369 		int i;
1370 
1371 		s->chan_rx = chan;
1372 
1373 		s->buf_len_rx = 2 * max(16, (int)port->fifosize);
1374 		buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
1375 					    &dma[0], GFP_KERNEL);
1376 
1377 		if (!buf[0]) {
1378 			dev_warn(port->dev,
1379 				 "failed to allocate dma buffer, using PIO\n");
1380 			sci_rx_dma_release(s, true);
1381 			return;
1382 		}
1383 
1384 		buf[1] = buf[0] + s->buf_len_rx;
1385 		dma[1] = dma[0] + s->buf_len_rx;
1386 
1387 		for (i = 0; i < 2; i++) {
1388 			struct scatterlist *sg = &s->sg_rx[i];
1389 
1390 			sg_init_table(sg, 1);
1391 			sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1392 				    (int)buf[i] & ~PAGE_MASK);
1393 			sg_dma_address(sg) = dma[i];
1394 		}
1395 
1396 		INIT_WORK(&s->work_rx, work_fn_rx);
1397 		setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
1398 
1399 		sci_submit_rx(s);
1400 	}
1401 }
1402 
sci_free_dma(struct uart_port * port)1403 static void sci_free_dma(struct uart_port *port)
1404 {
1405 	struct sci_port *s = to_sci_port(port);
1406 
1407 	if (!s->cfg->dma_dev)
1408 		return;
1409 
1410 	if (s->chan_tx)
1411 		sci_tx_dma_release(s, false);
1412 	if (s->chan_rx)
1413 		sci_rx_dma_release(s, false);
1414 }
1415 #else
sci_request_dma(struct uart_port * port)1416 static inline void sci_request_dma(struct uart_port *port)
1417 {
1418 }
1419 
sci_free_dma(struct uart_port * port)1420 static inline void sci_free_dma(struct uart_port *port)
1421 {
1422 }
1423 #endif
1424 
sci_startup(struct uart_port * port)1425 static int sci_startup(struct uart_port *port)
1426 {
1427 	struct sci_port *s = to_sci_port(port);
1428 	int ret;
1429 
1430 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1431 
1432 	if (s->enable)
1433 		s->enable(port);
1434 
1435 	ret = sci_request_irq(s);
1436 	if (unlikely(ret < 0))
1437 		return ret;
1438 
1439 	sci_request_dma(port);
1440 
1441 	sci_start_tx(port);
1442 	sci_start_rx(port);
1443 
1444 	return 0;
1445 }
1446 
sci_shutdown(struct uart_port * port)1447 static void sci_shutdown(struct uart_port *port)
1448 {
1449 	struct sci_port *s = to_sci_port(port);
1450 
1451 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1452 
1453 	sci_stop_rx(port);
1454 	sci_stop_tx(port);
1455 
1456 	sci_free_dma(port);
1457 	sci_free_irq(s);
1458 
1459 	if (s->disable)
1460 		s->disable(port);
1461 }
1462 
sci_scbrr_calc(unsigned int algo_id,unsigned int bps,unsigned long freq)1463 static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
1464 				   unsigned long freq)
1465 {
1466 	switch (algo_id) {
1467 	case SCBRR_ALGO_1:
1468 		return ((freq + 16 * bps) / (16 * bps) - 1);
1469 	case SCBRR_ALGO_2:
1470 		return ((freq + 16 * bps) / (32 * bps) - 1);
1471 	case SCBRR_ALGO_3:
1472 		return (((freq * 2) + 16 * bps) / (16 * bps) - 1);
1473 	case SCBRR_ALGO_4:
1474 		return (((freq * 2) + 16 * bps) / (32 * bps) - 1);
1475 	case SCBRR_ALGO_5:
1476 		return (((freq * 1000 / 32) / bps) - 1);
1477 	}
1478 
1479 	/* Warn, but use a safe default */
1480 	WARN_ON(1);
1481 
1482 	return ((freq + 16 * bps) / (32 * bps) - 1);
1483 }
1484 
sci_set_termios(struct uart_port * port,struct ktermios * termios,struct ktermios * old)1485 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1486 			    struct ktermios *old)
1487 {
1488 	struct sci_port *s = to_sci_port(port);
1489 	unsigned int status, baud, smr_val, max_baud;
1490 	int t = -1;
1491 	u16 scfcr = 0;
1492 
1493 	/*
1494 	 * earlyprintk comes here early on with port->uartclk set to zero.
1495 	 * the clock framework is not up and running at this point so here
1496 	 * we assume that 115200 is the maximum baud rate. please note that
1497 	 * the baud rate is not programmed during earlyprintk - it is assumed
1498 	 * that the previous boot loader has enabled required clocks and
1499 	 * setup the baud rate generator hardware for us already.
1500 	 */
1501 	max_baud = port->uartclk ? port->uartclk / 16 : 115200;
1502 
1503 	baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
1504 	if (likely(baud && port->uartclk))
1505 		t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
1506 
1507 	if (s->enable)
1508 		s->enable(port);
1509 
1510 	do {
1511 		status = sci_in(port, SCxSR);
1512 	} while (!(status & SCxSR_TEND(port)));
1513 
1514 	sci_out(port, SCSCR, 0x00);	/* TE=0, RE=0, CKE1=0 */
1515 
1516 	if (port->type != PORT_SCI)
1517 		sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1518 
1519 	smr_val = sci_in(port, SCSMR) & 3;
1520 
1521 	if ((termios->c_cflag & CSIZE) == CS7)
1522 		smr_val |= 0x40;
1523 	if (termios->c_cflag & PARENB)
1524 		smr_val |= 0x20;
1525 	if (termios->c_cflag & PARODD)
1526 		smr_val |= 0x30;
1527 	if (termios->c_cflag & CSTOPB)
1528 		smr_val |= 0x08;
1529 
1530 	uart_update_timeout(port, termios->c_cflag, baud);
1531 
1532 	sci_out(port, SCSMR, smr_val);
1533 
1534 	dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
1535 		s->cfg->scscr);
1536 
1537 	if (t > 0) {
1538 		if (t >= 256) {
1539 			sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1);
1540 			t >>= 2;
1541 		} else
1542 			sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3);
1543 
1544 		sci_out(port, SCBRR, t);
1545 		udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
1546 	}
1547 
1548 	sci_init_pins(port, termios->c_cflag);
1549 	sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
1550 
1551 	sci_out(port, SCSCR, s->cfg->scscr);
1552 
1553 #ifdef CONFIG_SERIAL_SH_SCI_DMA
1554 	/*
1555 	 * Calculate delay for 1.5 DMA buffers: see
1556 	 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1557 	 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1558 	 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1559 	 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1560 	 * sizes), but it has been found out experimentally, that this is not
1561 	 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1562 	 * as a minimum seem to work perfectly.
1563 	 */
1564 	if (s->chan_rx) {
1565 		s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1566 			port->fifosize / 2;
1567 		dev_dbg(port->dev,
1568 			"DMA Rx t-out %ums, tty t-out %u jiffies\n",
1569 			s->rx_timeout * 1000 / HZ, port->timeout);
1570 		if (s->rx_timeout < msecs_to_jiffies(20))
1571 			s->rx_timeout = msecs_to_jiffies(20);
1572 	}
1573 #endif
1574 
1575 	if ((termios->c_cflag & CREAD) != 0)
1576 		sci_start_rx(port);
1577 
1578 	if (s->disable)
1579 		s->disable(port);
1580 }
1581 
sci_type(struct uart_port * port)1582 static const char *sci_type(struct uart_port *port)
1583 {
1584 	switch (port->type) {
1585 	case PORT_IRDA:
1586 		return "irda";
1587 	case PORT_SCI:
1588 		return "sci";
1589 	case PORT_SCIF:
1590 		return "scif";
1591 	case PORT_SCIFA:
1592 		return "scifa";
1593 	case PORT_SCIFB:
1594 		return "scifb";
1595 	}
1596 
1597 	return NULL;
1598 }
1599 
sci_port_size(struct uart_port * port)1600 static inline unsigned long sci_port_size(struct uart_port *port)
1601 {
1602 	/*
1603 	 * Pick an arbitrary size that encapsulates all of the base
1604 	 * registers by default. This can be optimized later, or derived
1605 	 * from platform resource data at such a time that ports begin to
1606 	 * behave more erratically.
1607 	 */
1608 	return 64;
1609 }
1610 
sci_remap_port(struct uart_port * port)1611 static int sci_remap_port(struct uart_port *port)
1612 {
1613 	unsigned long size = sci_port_size(port);
1614 
1615 	/*
1616 	 * Nothing to do if there's already an established membase.
1617 	 */
1618 	if (port->membase)
1619 		return 0;
1620 
1621 	if (port->flags & UPF_IOREMAP) {
1622 		port->membase = ioremap_nocache(port->mapbase, size);
1623 		if (unlikely(!port->membase)) {
1624 			dev_err(port->dev, "can't remap port#%d\n", port->line);
1625 			return -ENXIO;
1626 		}
1627 	} else {
1628 		/*
1629 		 * For the simple (and majority of) cases where we don't
1630 		 * need to do any remapping, just cast the cookie
1631 		 * directly.
1632 		 */
1633 		port->membase = (void __iomem *)port->mapbase;
1634 	}
1635 
1636 	return 0;
1637 }
1638 
sci_release_port(struct uart_port * port)1639 static void sci_release_port(struct uart_port *port)
1640 {
1641 	if (port->flags & UPF_IOREMAP) {
1642 		iounmap(port->membase);
1643 		port->membase = NULL;
1644 	}
1645 
1646 	release_mem_region(port->mapbase, sci_port_size(port));
1647 }
1648 
sci_request_port(struct uart_port * port)1649 static int sci_request_port(struct uart_port *port)
1650 {
1651 	unsigned long size = sci_port_size(port);
1652 	struct resource *res;
1653 	int ret;
1654 
1655 	res = request_mem_region(port->mapbase, size, dev_name(port->dev));
1656 	if (unlikely(res == NULL))
1657 		return -EBUSY;
1658 
1659 	ret = sci_remap_port(port);
1660 	if (unlikely(ret != 0)) {
1661 		release_resource(res);
1662 		return ret;
1663 	}
1664 
1665 	return 0;
1666 }
1667 
sci_config_port(struct uart_port * port,int flags)1668 static void sci_config_port(struct uart_port *port, int flags)
1669 {
1670 	if (flags & UART_CONFIG_TYPE) {
1671 		struct sci_port *sport = to_sci_port(port);
1672 
1673 		port->type = sport->cfg->type;
1674 		sci_request_port(port);
1675 	}
1676 }
1677 
sci_verify_port(struct uart_port * port,struct serial_struct * ser)1678 static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
1679 {
1680 	struct sci_port *s = to_sci_port(port);
1681 
1682 	if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
1683 		return -EINVAL;
1684 	if (ser->baud_base < 2400)
1685 		/* No paper tape reader for Mitch.. */
1686 		return -EINVAL;
1687 
1688 	return 0;
1689 }
1690 
1691 static struct uart_ops sci_uart_ops = {
1692 	.tx_empty	= sci_tx_empty,
1693 	.set_mctrl	= sci_set_mctrl,
1694 	.get_mctrl	= sci_get_mctrl,
1695 	.start_tx	= sci_start_tx,
1696 	.stop_tx	= sci_stop_tx,
1697 	.stop_rx	= sci_stop_rx,
1698 	.enable_ms	= sci_enable_ms,
1699 	.break_ctl	= sci_break_ctl,
1700 	.startup	= sci_startup,
1701 	.shutdown	= sci_shutdown,
1702 	.set_termios	= sci_set_termios,
1703 	.type		= sci_type,
1704 	.release_port	= sci_release_port,
1705 	.request_port	= sci_request_port,
1706 	.config_port	= sci_config_port,
1707 	.verify_port	= sci_verify_port,
1708 #ifdef CONFIG_CONSOLE_POLL
1709 	.poll_get_char	= sci_poll_get_char,
1710 	.poll_put_char	= sci_poll_put_char,
1711 #endif
1712 };
1713 
sci_init_single(struct platform_device * dev,struct sci_port * sci_port,unsigned int index,struct plat_sci_port * p)1714 static int __devinit sci_init_single(struct platform_device *dev,
1715 				     struct sci_port *sci_port,
1716 				     unsigned int index,
1717 				     struct plat_sci_port *p)
1718 {
1719 	struct uart_port *port = &sci_port->port;
1720 
1721 	port->ops	= &sci_uart_ops;
1722 	port->iotype	= UPIO_MEM;
1723 	port->line	= index;
1724 
1725 	switch (p->type) {
1726 	case PORT_SCIFB:
1727 		port->fifosize = 256;
1728 		break;
1729 	case PORT_SCIFA:
1730 		port->fifosize = 64;
1731 		break;
1732 	case PORT_SCIF:
1733 		port->fifosize = 16;
1734 		break;
1735 	default:
1736 		port->fifosize = 1;
1737 		break;
1738 	}
1739 
1740 	if (dev) {
1741 		sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1742 		if (IS_ERR(sci_port->iclk)) {
1743 			sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
1744 			if (IS_ERR(sci_port->iclk)) {
1745 				dev_err(&dev->dev, "can't get iclk\n");
1746 				return PTR_ERR(sci_port->iclk);
1747 			}
1748 		}
1749 
1750 		/*
1751 		 * The function clock is optional, ignore it if we can't
1752 		 * find it.
1753 		 */
1754 		sci_port->fclk = clk_get(&dev->dev, "sci_fck");
1755 		if (IS_ERR(sci_port->fclk))
1756 			sci_port->fclk = NULL;
1757 
1758 		sci_port->enable = sci_clk_enable;
1759 		sci_port->disable = sci_clk_disable;
1760 		port->dev = &dev->dev;
1761 	}
1762 
1763 	sci_port->break_timer.data = (unsigned long)sci_port;
1764 	sci_port->break_timer.function = sci_break_timer;
1765 	init_timer(&sci_port->break_timer);
1766 
1767 	sci_port->cfg		= p;
1768 
1769 	port->mapbase		= p->mapbase;
1770 	port->type		= p->type;
1771 	port->flags		= p->flags;
1772 
1773 	/*
1774 	 * The UART port needs an IRQ value, so we peg this to the TX IRQ
1775 	 * for the multi-IRQ ports, which is where we are primarily
1776 	 * concerned with the shutdown path synchronization.
1777 	 *
1778 	 * For the muxed case there's nothing more to do.
1779 	 */
1780 	port->irq		= p->irqs[SCIx_TXI_IRQ];
1781 
1782 	if (p->dma_dev)
1783 		dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
1784 			p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
1785 
1786 	return 0;
1787 }
1788 
1789 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
serial_console_putchar(struct uart_port * port,int ch)1790 static void serial_console_putchar(struct uart_port *port, int ch)
1791 {
1792 	sci_poll_put_char(port, ch);
1793 }
1794 
1795 /*
1796  *	Print a string to the serial port trying not to disturb
1797  *	any possible real use of the port...
1798  */
serial_console_write(struct console * co,const char * s,unsigned count)1799 static void serial_console_write(struct console *co, const char *s,
1800 				 unsigned count)
1801 {
1802 	struct sci_port *sci_port = &sci_ports[co->index];
1803 	struct uart_port *port = &sci_port->port;
1804 	unsigned short bits;
1805 
1806 	if (sci_port->enable)
1807 		sci_port->enable(port);
1808 
1809 	uart_console_write(port, s, count, serial_console_putchar);
1810 
1811 	/* wait until fifo is empty and last bit has been transmitted */
1812 	bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
1813 	while ((sci_in(port, SCxSR) & bits) != bits)
1814 		cpu_relax();
1815 
1816 	if (sci_port->disable)
1817 		sci_port->disable(port);
1818 }
1819 
serial_console_setup(struct console * co,char * options)1820 static int __devinit serial_console_setup(struct console *co, char *options)
1821 {
1822 	struct sci_port *sci_port;
1823 	struct uart_port *port;
1824 	int baud = 115200;
1825 	int bits = 8;
1826 	int parity = 'n';
1827 	int flow = 'n';
1828 	int ret;
1829 
1830 	/*
1831 	 * Refuse to handle any bogus ports.
1832 	 */
1833 	if (co->index < 0 || co->index >= SCI_NPORTS)
1834 		return -ENODEV;
1835 
1836 	sci_port = &sci_ports[co->index];
1837 	port = &sci_port->port;
1838 
1839 	/*
1840 	 * Refuse to handle uninitialized ports.
1841 	 */
1842 	if (!port->ops)
1843 		return -ENODEV;
1844 
1845 	ret = sci_remap_port(port);
1846 	if (unlikely(ret != 0))
1847 		return ret;
1848 
1849 	if (sci_port->enable)
1850 		sci_port->enable(port);
1851 
1852 	if (options)
1853 		uart_parse_options(options, &baud, &parity, &bits, &flow);
1854 
1855 	ret = uart_set_options(port, co, baud, parity, bits, flow);
1856 #if defined(__H8300H__) || defined(__H8300S__)
1857 	/* disable rx interrupt */
1858 	if (ret == 0)
1859 		sci_stop_rx(port);
1860 #endif
1861 	/* TODO: disable clock */
1862 	return ret;
1863 }
1864 
1865 static struct console serial_console = {
1866 	.name		= "ttySC",
1867 	.device		= uart_console_device,
1868 	.write		= serial_console_write,
1869 	.setup		= serial_console_setup,
1870 	.flags		= CON_PRINTBUFFER,
1871 	.index		= -1,
1872 	.data		= &sci_uart_driver,
1873 };
1874 
1875 static struct console early_serial_console = {
1876 	.name           = "early_ttySC",
1877 	.write          = serial_console_write,
1878 	.flags          = CON_PRINTBUFFER,
1879 	.index		= -1,
1880 };
1881 
1882 static char early_serial_buf[32];
1883 
sci_probe_earlyprintk(struct platform_device * pdev)1884 static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
1885 {
1886 	struct plat_sci_port *cfg = pdev->dev.platform_data;
1887 
1888 	if (early_serial_console.data)
1889 		return -EEXIST;
1890 
1891 	early_serial_console.index = pdev->id;
1892 
1893 	sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
1894 
1895 	serial_console_setup(&early_serial_console, early_serial_buf);
1896 
1897 	if (!strstr(early_serial_buf, "keep"))
1898 		early_serial_console.flags |= CON_BOOT;
1899 
1900 	register_console(&early_serial_console);
1901 	return 0;
1902 }
1903 
1904 #define SCI_CONSOLE	(&serial_console)
1905 
1906 #else
sci_probe_earlyprintk(struct platform_device * pdev)1907 static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
1908 {
1909 	return -EINVAL;
1910 }
1911 
1912 #define SCI_CONSOLE	NULL
1913 
1914 #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
1915 
1916 static char banner[] __initdata =
1917 	KERN_INFO "SuperH SCI(F) driver initialized\n";
1918 
1919 static struct uart_driver sci_uart_driver = {
1920 	.owner		= THIS_MODULE,
1921 	.driver_name	= "sci",
1922 	.dev_name	= "ttySC",
1923 	.major		= SCI_MAJOR,
1924 	.minor		= SCI_MINOR_START,
1925 	.nr		= SCI_NPORTS,
1926 	.cons		= SCI_CONSOLE,
1927 };
1928 
sci_remove(struct platform_device * dev)1929 static int sci_remove(struct platform_device *dev)
1930 {
1931 	struct sci_port *port = platform_get_drvdata(dev);
1932 
1933 	cpufreq_unregister_notifier(&port->freq_transition,
1934 				    CPUFREQ_TRANSITION_NOTIFIER);
1935 
1936 	uart_remove_one_port(&sci_uart_driver, &port->port);
1937 
1938 	clk_put(port->iclk);
1939 	clk_put(port->fclk);
1940 
1941 	return 0;
1942 }
1943 
sci_probe_single(struct platform_device * dev,unsigned int index,struct plat_sci_port * p,struct sci_port * sciport)1944 static int __devinit sci_probe_single(struct platform_device *dev,
1945 				      unsigned int index,
1946 				      struct plat_sci_port *p,
1947 				      struct sci_port *sciport)
1948 {
1949 	int ret;
1950 
1951 	/* Sanity check */
1952 	if (unlikely(index >= SCI_NPORTS)) {
1953 		dev_notice(&dev->dev, "Attempting to register port "
1954 			   "%d when only %d are available.\n",
1955 			   index+1, SCI_NPORTS);
1956 		dev_notice(&dev->dev, "Consider bumping "
1957 			   "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
1958 		return 0;
1959 	}
1960 
1961 	ret = sci_init_single(dev, sciport, index, p);
1962 	if (ret)
1963 		return ret;
1964 
1965 	return uart_add_one_port(&sci_uart_driver, &sciport->port);
1966 }
1967 
sci_probe(struct platform_device * dev)1968 static int __devinit sci_probe(struct platform_device *dev)
1969 {
1970 	struct plat_sci_port *p = dev->dev.platform_data;
1971 	struct sci_port *sp = &sci_ports[dev->id];
1972 	int ret;
1973 
1974 	/*
1975 	 * If we've come here via earlyprintk initialization, head off to
1976 	 * the special early probe. We don't have sufficient device state
1977 	 * to make it beyond this yet.
1978 	 */
1979 	if (is_early_platform_device(dev))
1980 		return sci_probe_earlyprintk(dev);
1981 
1982 	platform_set_drvdata(dev, sp);
1983 
1984 	ret = sci_probe_single(dev, dev->id, p, sp);
1985 	if (ret)
1986 		goto err_unreg;
1987 
1988 	sp->freq_transition.notifier_call = sci_notifier;
1989 
1990 	ret = cpufreq_register_notifier(&sp->freq_transition,
1991 					CPUFREQ_TRANSITION_NOTIFIER);
1992 	if (unlikely(ret < 0))
1993 		goto err_unreg;
1994 
1995 #ifdef CONFIG_SH_STANDARD_BIOS
1996 	sh_bios_gdb_detach();
1997 #endif
1998 
1999 	return 0;
2000 
2001 err_unreg:
2002 	sci_remove(dev);
2003 	return ret;
2004 }
2005 
sci_suspend(struct device * dev)2006 static int sci_suspend(struct device *dev)
2007 {
2008 	struct sci_port *sport = dev_get_drvdata(dev);
2009 
2010 	if (sport)
2011 		uart_suspend_port(&sci_uart_driver, &sport->port);
2012 
2013 	return 0;
2014 }
2015 
sci_resume(struct device * dev)2016 static int sci_resume(struct device *dev)
2017 {
2018 	struct sci_port *sport = dev_get_drvdata(dev);
2019 
2020 	if (sport)
2021 		uart_resume_port(&sci_uart_driver, &sport->port);
2022 
2023 	return 0;
2024 }
2025 
2026 static const struct dev_pm_ops sci_dev_pm_ops = {
2027 	.suspend	= sci_suspend,
2028 	.resume		= sci_resume,
2029 };
2030 
2031 static struct platform_driver sci_driver = {
2032 	.probe		= sci_probe,
2033 	.remove		= sci_remove,
2034 	.driver		= {
2035 		.name	= "sh-sci",
2036 		.owner	= THIS_MODULE,
2037 		.pm	= &sci_dev_pm_ops,
2038 	},
2039 };
2040 
sci_init(void)2041 static int __init sci_init(void)
2042 {
2043 	int ret;
2044 
2045 	printk(banner);
2046 
2047 	ret = uart_register_driver(&sci_uart_driver);
2048 	if (likely(ret == 0)) {
2049 		ret = platform_driver_register(&sci_driver);
2050 		if (unlikely(ret))
2051 			uart_unregister_driver(&sci_uart_driver);
2052 	}
2053 
2054 	return ret;
2055 }
2056 
sci_exit(void)2057 static void __exit sci_exit(void)
2058 {
2059 	platform_driver_unregister(&sci_driver);
2060 	uart_unregister_driver(&sci_uart_driver);
2061 }
2062 
2063 #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2064 early_platform_init_buffer("earlyprintk", &sci_driver,
2065 			   early_serial_buf, ARRAY_SIZE(early_serial_buf));
2066 #endif
2067 module_init(sci_init);
2068 module_exit(sci_exit);
2069 
2070 MODULE_LICENSE("GPL");
2071 MODULE_ALIAS("platform:sh-sci");
2072