1 /*
2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
3 *
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
7 *
8 * This code also borrows from usbserial.c, which is
9 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
16 */
17
18 /* #define VERBOSE_DEBUG */
19
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/delay.h>
25 #include <linux/tty.h>
26 #include <linux/tty_flip.h>
27 #include <linux/slab.h>
28 #include <linux/export.h>
29
30 #include "u_serial.h"
31
32
33 /*
34 * This component encapsulates the TTY layer glue needed to provide basic
35 * "serial port" functionality through the USB gadget stack. Each such
36 * port is exposed through a /dev/ttyGS* node.
37 *
38 * After initialization (gserial_setup), these TTY port devices stay
39 * available until they are removed (gserial_cleanup). Each one may be
40 * connected to a USB function (gserial_connect), or disconnected (with
41 * gserial_disconnect) when the USB host issues a config change event.
42 * Data can only flow when the port is connected to the host.
43 *
44 * A given TTY port can be made available in multiple configurations.
45 * For example, each one might expose a ttyGS0 node which provides a
46 * login application. In one case that might use CDC ACM interface 0,
47 * while another configuration might use interface 3 for that. The
48 * work to handle that (including descriptor management) is not part
49 * of this component.
50 *
51 * Configurations may expose more than one TTY port. For example, if
52 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
53 * for a telephone or fax link. And ttyGS2 might be something that just
54 * needs a simple byte stream interface for some messaging protocol that
55 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
56 */
57
58 #define PREFIX "ttyGS"
59
60 /*
61 * gserial is the lifecycle interface, used by USB functions
62 * gs_port is the I/O nexus, used by the tty driver
63 * tty_struct links to the tty/filesystem framework
64 *
65 * gserial <---> gs_port ... links will be null when the USB link is
66 * inactive; managed by gserial_{connect,disconnect}(). each gserial
67 * instance can wrap its own USB control protocol.
68 * gserial->ioport == usb_ep->driver_data ... gs_port
69 * gs_port->port_usb ... gserial
70 *
71 * gs_port <---> tty_struct ... links will be null when the TTY file
72 * isn't opened; managed by gs_open()/gs_close()
73 * gserial->port_tty ... tty_struct
74 * tty_struct->driver_data ... gserial
75 */
76
77 /* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
78 * next layer of buffering. For TX that's a circular buffer; for RX
79 * consider it a NOP. A third layer is provided by the TTY code.
80 */
81 #define QUEUE_SIZE 16
82 #define WRITE_BUF_SIZE 8192 /* TX only */
83
84 /* circular buffer */
85 struct gs_buf {
86 unsigned buf_size;
87 char *buf_buf;
88 char *buf_get;
89 char *buf_put;
90 };
91
92 /*
93 * The port structure holds info for each port, one for each minor number
94 * (and thus for each /dev/ node).
95 */
96 struct gs_port {
97 spinlock_t port_lock; /* guard port_* access */
98
99 struct gserial *port_usb;
100 struct tty_struct *port_tty;
101
102 unsigned open_count;
103 bool openclose; /* open/close in progress */
104 u8 port_num;
105
106 wait_queue_head_t close_wait; /* wait for last close */
107
108 struct list_head read_pool;
109 int read_started;
110 int read_allocated;
111 struct list_head read_queue;
112 unsigned n_read;
113 struct tasklet_struct push;
114
115 struct list_head write_pool;
116 int write_started;
117 int write_allocated;
118 struct gs_buf port_write_buf;
119 wait_queue_head_t drain_wait; /* wait while writes drain */
120
121 /* REVISIT this state ... */
122 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
123 };
124
125 /* increase N_PORTS if you need more */
126 #define N_PORTS 4
127 static struct portmaster {
128 struct mutex lock; /* protect open/close */
129 struct gs_port *port;
130 } ports[N_PORTS];
131 static unsigned n_ports;
132
133 #define GS_CLOSE_TIMEOUT 15 /* seconds */
134
135
136
137 #ifdef VERBOSE_DEBUG
138 #define pr_vdebug(fmt, arg...) \
139 pr_debug(fmt, ##arg)
140 #else
141 #define pr_vdebug(fmt, arg...) \
142 ({ if (0) pr_debug(fmt, ##arg); })
143 #endif
144
145 /*-------------------------------------------------------------------------*/
146
147 /* Circular Buffer */
148
149 /*
150 * gs_buf_alloc
151 *
152 * Allocate a circular buffer and all associated memory.
153 */
gs_buf_alloc(struct gs_buf * gb,unsigned size)154 static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
155 {
156 gb->buf_buf = kmalloc(size, GFP_KERNEL);
157 if (gb->buf_buf == NULL)
158 return -ENOMEM;
159
160 gb->buf_size = size;
161 gb->buf_put = gb->buf_buf;
162 gb->buf_get = gb->buf_buf;
163
164 return 0;
165 }
166
167 /*
168 * gs_buf_free
169 *
170 * Free the buffer and all associated memory.
171 */
gs_buf_free(struct gs_buf * gb)172 static void gs_buf_free(struct gs_buf *gb)
173 {
174 kfree(gb->buf_buf);
175 gb->buf_buf = NULL;
176 }
177
178 /*
179 * gs_buf_clear
180 *
181 * Clear out all data in the circular buffer.
182 */
gs_buf_clear(struct gs_buf * gb)183 static void gs_buf_clear(struct gs_buf *gb)
184 {
185 gb->buf_get = gb->buf_put;
186 /* equivalent to a get of all data available */
187 }
188
189 /*
190 * gs_buf_data_avail
191 *
192 * Return the number of bytes of data written into the circular
193 * buffer.
194 */
gs_buf_data_avail(struct gs_buf * gb)195 static unsigned gs_buf_data_avail(struct gs_buf *gb)
196 {
197 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
198 }
199
200 /*
201 * gs_buf_space_avail
202 *
203 * Return the number of bytes of space available in the circular
204 * buffer.
205 */
gs_buf_space_avail(struct gs_buf * gb)206 static unsigned gs_buf_space_avail(struct gs_buf *gb)
207 {
208 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
209 }
210
211 /*
212 * gs_buf_put
213 *
214 * Copy data data from a user buffer and put it into the circular buffer.
215 * Restrict to the amount of space available.
216 *
217 * Return the number of bytes copied.
218 */
219 static unsigned
gs_buf_put(struct gs_buf * gb,const char * buf,unsigned count)220 gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
221 {
222 unsigned len;
223
224 len = gs_buf_space_avail(gb);
225 if (count > len)
226 count = len;
227
228 if (count == 0)
229 return 0;
230
231 len = gb->buf_buf + gb->buf_size - gb->buf_put;
232 if (count > len) {
233 memcpy(gb->buf_put, buf, len);
234 memcpy(gb->buf_buf, buf+len, count - len);
235 gb->buf_put = gb->buf_buf + count - len;
236 } else {
237 memcpy(gb->buf_put, buf, count);
238 if (count < len)
239 gb->buf_put += count;
240 else /* count == len */
241 gb->buf_put = gb->buf_buf;
242 }
243
244 return count;
245 }
246
247 /*
248 * gs_buf_get
249 *
250 * Get data from the circular buffer and copy to the given buffer.
251 * Restrict to the amount of data available.
252 *
253 * Return the number of bytes copied.
254 */
255 static unsigned
gs_buf_get(struct gs_buf * gb,char * buf,unsigned count)256 gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
257 {
258 unsigned len;
259
260 len = gs_buf_data_avail(gb);
261 if (count > len)
262 count = len;
263
264 if (count == 0)
265 return 0;
266
267 len = gb->buf_buf + gb->buf_size - gb->buf_get;
268 if (count > len) {
269 memcpy(buf, gb->buf_get, len);
270 memcpy(buf+len, gb->buf_buf, count - len);
271 gb->buf_get = gb->buf_buf + count - len;
272 } else {
273 memcpy(buf, gb->buf_get, count);
274 if (count < len)
275 gb->buf_get += count;
276 else /* count == len */
277 gb->buf_get = gb->buf_buf;
278 }
279
280 return count;
281 }
282
283 /*-------------------------------------------------------------------------*/
284
285 /* I/O glue between TTY (upper) and USB function (lower) driver layers */
286
287 /*
288 * gs_alloc_req
289 *
290 * Allocate a usb_request and its buffer. Returns a pointer to the
291 * usb_request or NULL if there is an error.
292 */
293 struct usb_request *
gs_alloc_req(struct usb_ep * ep,unsigned len,gfp_t kmalloc_flags)294 gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
295 {
296 struct usb_request *req;
297
298 req = usb_ep_alloc_request(ep, kmalloc_flags);
299
300 if (req != NULL) {
301 req->length = len;
302 req->buf = kmalloc(len, kmalloc_flags);
303 if (req->buf == NULL) {
304 usb_ep_free_request(ep, req);
305 return NULL;
306 }
307 }
308
309 return req;
310 }
311
312 /*
313 * gs_free_req
314 *
315 * Free a usb_request and its buffer.
316 */
gs_free_req(struct usb_ep * ep,struct usb_request * req)317 void gs_free_req(struct usb_ep *ep, struct usb_request *req)
318 {
319 kfree(req->buf);
320 usb_ep_free_request(ep, req);
321 }
322
323 /*
324 * gs_send_packet
325 *
326 * If there is data to send, a packet is built in the given
327 * buffer and the size is returned. If there is no data to
328 * send, 0 is returned.
329 *
330 * Called with port_lock held.
331 */
332 static unsigned
gs_send_packet(struct gs_port * port,char * packet,unsigned size)333 gs_send_packet(struct gs_port *port, char *packet, unsigned size)
334 {
335 unsigned len;
336
337 len = gs_buf_data_avail(&port->port_write_buf);
338 if (len < size)
339 size = len;
340 if (size != 0)
341 size = gs_buf_get(&port->port_write_buf, packet, size);
342 return size;
343 }
344
345 /*
346 * gs_start_tx
347 *
348 * This function finds available write requests, calls
349 * gs_send_packet to fill these packets with data, and
350 * continues until either there are no more write requests
351 * available or no more data to send. This function is
352 * run whenever data arrives or write requests are available.
353 *
354 * Context: caller owns port_lock; port_usb is non-null.
355 */
gs_start_tx(struct gs_port * port)356 static int gs_start_tx(struct gs_port *port)
357 /*
358 __releases(&port->port_lock)
359 __acquires(&port->port_lock)
360 */
361 {
362 struct list_head *pool = &port->write_pool;
363 struct usb_ep *in = port->port_usb->in;
364 int status = 0;
365 bool do_tty_wake = false;
366
367 while (!list_empty(pool)) {
368 struct usb_request *req;
369 int len;
370
371 if (port->write_started >= QUEUE_SIZE)
372 break;
373
374 req = list_entry(pool->next, struct usb_request, list);
375 len = gs_send_packet(port, req->buf, in->maxpacket);
376 if (len == 0) {
377 wake_up_interruptible(&port->drain_wait);
378 break;
379 }
380 do_tty_wake = true;
381
382 req->length = len;
383 list_del(&req->list);
384 req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
385
386 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
387 port->port_num, len, *((u8 *)req->buf),
388 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
389
390 /* Drop lock while we call out of driver; completions
391 * could be issued while we do so. Disconnection may
392 * happen too; maybe immediately before we queue this!
393 *
394 * NOTE that we may keep sending data for a while after
395 * the TTY closed (dev->ioport->port_tty is NULL).
396 */
397 spin_unlock(&port->port_lock);
398 status = usb_ep_queue(in, req, GFP_ATOMIC);
399 spin_lock(&port->port_lock);
400
401 if (status) {
402 pr_debug("%s: %s %s err %d\n",
403 __func__, "queue", in->name, status);
404 list_add(&req->list, pool);
405 break;
406 }
407
408 port->write_started++;
409
410 /* abort immediately after disconnect */
411 if (!port->port_usb)
412 break;
413 }
414
415 if (do_tty_wake && port->port_tty)
416 tty_wakeup(port->port_tty);
417 return status;
418 }
419
420 /*
421 * Context: caller owns port_lock, and port_usb is set
422 */
gs_start_rx(struct gs_port * port)423 static unsigned gs_start_rx(struct gs_port *port)
424 /*
425 __releases(&port->port_lock)
426 __acquires(&port->port_lock)
427 */
428 {
429 struct list_head *pool = &port->read_pool;
430 struct usb_ep *out = port->port_usb->out;
431
432 while (!list_empty(pool)) {
433 struct usb_request *req;
434 int status;
435 struct tty_struct *tty;
436
437 /* no more rx if closed */
438 tty = port->port_tty;
439 if (!tty)
440 break;
441
442 if (port->read_started >= QUEUE_SIZE)
443 break;
444
445 req = list_entry(pool->next, struct usb_request, list);
446 list_del(&req->list);
447 req->length = out->maxpacket;
448
449 /* drop lock while we call out; the controller driver
450 * may need to call us back (e.g. for disconnect)
451 */
452 spin_unlock(&port->port_lock);
453 status = usb_ep_queue(out, req, GFP_ATOMIC);
454 spin_lock(&port->port_lock);
455
456 if (status) {
457 pr_debug("%s: %s %s err %d\n",
458 __func__, "queue", out->name, status);
459 list_add(&req->list, pool);
460 break;
461 }
462 port->read_started++;
463
464 /* abort immediately after disconnect */
465 if (!port->port_usb)
466 break;
467 }
468 return port->read_started;
469 }
470
471 /*
472 * RX tasklet takes data out of the RX queue and hands it up to the TTY
473 * layer until it refuses to take any more data (or is throttled back).
474 * Then it issues reads for any further data.
475 *
476 * If the RX queue becomes full enough that no usb_request is queued,
477 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
478 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
479 * can be buffered before the TTY layer's buffers (currently 64 KB).
480 */
gs_rx_push(unsigned long _port)481 static void gs_rx_push(unsigned long _port)
482 {
483 struct gs_port *port = (void *)_port;
484 struct tty_struct *tty;
485 struct list_head *queue = &port->read_queue;
486 bool disconnect = false;
487 bool do_push = false;
488
489 /* hand any queued data to the tty */
490 spin_lock_irq(&port->port_lock);
491 tty = port->port_tty;
492 while (!list_empty(queue)) {
493 struct usb_request *req;
494
495 req = list_first_entry(queue, struct usb_request, list);
496
497 /* discard data if tty was closed */
498 if (!tty)
499 goto recycle;
500
501 /* leave data queued if tty was rx throttled */
502 if (test_bit(TTY_THROTTLED, &tty->flags))
503 break;
504
505 switch (req->status) {
506 case -ESHUTDOWN:
507 disconnect = true;
508 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
509 break;
510
511 default:
512 /* presumably a transient fault */
513 pr_warning(PREFIX "%d: unexpected RX status %d\n",
514 port->port_num, req->status);
515 /* FALLTHROUGH */
516 case 0:
517 /* normal completion */
518 break;
519 }
520
521 /* push data to (open) tty */
522 if (req->actual) {
523 char *packet = req->buf;
524 unsigned size = req->actual;
525 unsigned n;
526 int count;
527
528 /* we may have pushed part of this packet already... */
529 n = port->n_read;
530 if (n) {
531 packet += n;
532 size -= n;
533 }
534
535 count = tty_insert_flip_string(tty, packet, size);
536 if (count)
537 do_push = true;
538 if (count != size) {
539 /* stop pushing; TTY layer can't handle more */
540 port->n_read += count;
541 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
542 port->port_num,
543 count, req->actual);
544 break;
545 }
546 port->n_read = 0;
547 }
548 recycle:
549 list_move(&req->list, &port->read_pool);
550 port->read_started--;
551 }
552
553 /* Push from tty to ldisc; without low_latency set this is handled by
554 * a workqueue, so we won't get callbacks and can hold port_lock
555 */
556 if (tty && do_push)
557 tty_flip_buffer_push(tty);
558
559
560 /* We want our data queue to become empty ASAP, keeping data
561 * in the tty and ldisc (not here). If we couldn't push any
562 * this time around, there may be trouble unless there's an
563 * implicit tty_unthrottle() call on its way...
564 *
565 * REVISIT we should probably add a timer to keep the tasklet
566 * from starving ... but it's not clear that case ever happens.
567 */
568 if (!list_empty(queue) && tty) {
569 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
570 if (do_push)
571 tasklet_schedule(&port->push);
572 else
573 pr_warning(PREFIX "%d: RX not scheduled?\n",
574 port->port_num);
575 }
576 }
577
578 /* If we're still connected, refill the USB RX queue. */
579 if (!disconnect && port->port_usb)
580 gs_start_rx(port);
581
582 spin_unlock_irq(&port->port_lock);
583 }
584
gs_read_complete(struct usb_ep * ep,struct usb_request * req)585 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
586 {
587 struct gs_port *port = ep->driver_data;
588
589 /* Queue all received data until the tty layer is ready for it. */
590 spin_lock(&port->port_lock);
591 list_add_tail(&req->list, &port->read_queue);
592 tasklet_schedule(&port->push);
593 spin_unlock(&port->port_lock);
594 }
595
gs_write_complete(struct usb_ep * ep,struct usb_request * req)596 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
597 {
598 struct gs_port *port = ep->driver_data;
599
600 spin_lock(&port->port_lock);
601 list_add(&req->list, &port->write_pool);
602 port->write_started--;
603
604 switch (req->status) {
605 default:
606 /* presumably a transient fault */
607 pr_warning("%s: unexpected %s status %d\n",
608 __func__, ep->name, req->status);
609 /* FALL THROUGH */
610 case 0:
611 /* normal completion */
612 gs_start_tx(port);
613 break;
614
615 case -ESHUTDOWN:
616 /* disconnect */
617 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
618 break;
619 }
620
621 spin_unlock(&port->port_lock);
622 }
623
gs_free_requests(struct usb_ep * ep,struct list_head * head,int * allocated)624 static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
625 int *allocated)
626 {
627 struct usb_request *req;
628
629 while (!list_empty(head)) {
630 req = list_entry(head->next, struct usb_request, list);
631 list_del(&req->list);
632 gs_free_req(ep, req);
633 if (allocated)
634 (*allocated)--;
635 }
636 }
637
gs_alloc_requests(struct usb_ep * ep,struct list_head * head,void (* fn)(struct usb_ep *,struct usb_request *),int * allocated)638 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
639 void (*fn)(struct usb_ep *, struct usb_request *),
640 int *allocated)
641 {
642 int i;
643 struct usb_request *req;
644 int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
645
646 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
647 * do quite that many this time, don't fail ... we just won't
648 * be as speedy as we might otherwise be.
649 */
650 for (i = 0; i < n; i++) {
651 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
652 if (!req)
653 return list_empty(head) ? -ENOMEM : 0;
654 req->complete = fn;
655 list_add_tail(&req->list, head);
656 if (allocated)
657 (*allocated)++;
658 }
659 return 0;
660 }
661
662 /**
663 * gs_start_io - start USB I/O streams
664 * @dev: encapsulates endpoints to use
665 * Context: holding port_lock; port_tty and port_usb are non-null
666 *
667 * We only start I/O when something is connected to both sides of
668 * this port. If nothing is listening on the host side, we may
669 * be pointlessly filling up our TX buffers and FIFO.
670 */
gs_start_io(struct gs_port * port)671 static int gs_start_io(struct gs_port *port)
672 {
673 struct list_head *head = &port->read_pool;
674 struct usb_ep *ep = port->port_usb->out;
675 int status;
676 unsigned started;
677
678 /* Allocate RX and TX I/O buffers. We can't easily do this much
679 * earlier (with GFP_KERNEL) because the requests are coupled to
680 * endpoints, as are the packet sizes we'll be using. Different
681 * configurations may use different endpoints with a given port;
682 * and high speed vs full speed changes packet sizes too.
683 */
684 status = gs_alloc_requests(ep, head, gs_read_complete,
685 &port->read_allocated);
686 if (status)
687 return status;
688
689 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
690 gs_write_complete, &port->write_allocated);
691 if (status) {
692 gs_free_requests(ep, head, &port->read_allocated);
693 return status;
694 }
695
696 /* queue read requests */
697 port->n_read = 0;
698 started = gs_start_rx(port);
699
700 /* unblock any pending writes into our circular buffer */
701 if (started) {
702 tty_wakeup(port->port_tty);
703 } else {
704 gs_free_requests(ep, head, &port->read_allocated);
705 gs_free_requests(port->port_usb->in, &port->write_pool,
706 &port->write_allocated);
707 status = -EIO;
708 }
709
710 return status;
711 }
712
713 /*-------------------------------------------------------------------------*/
714
715 /* TTY Driver */
716
717 /*
718 * gs_open sets up the link between a gs_port and its associated TTY.
719 * That link is broken *only* by TTY close(), and all driver methods
720 * know that.
721 */
gs_open(struct tty_struct * tty,struct file * file)722 static int gs_open(struct tty_struct *tty, struct file *file)
723 {
724 int port_num = tty->index;
725 struct gs_port *port;
726 int status;
727
728 do {
729 mutex_lock(&ports[port_num].lock);
730 port = ports[port_num].port;
731 if (!port)
732 status = -ENODEV;
733 else {
734 spin_lock_irq(&port->port_lock);
735
736 /* already open? Great. */
737 if (port->open_count) {
738 status = 0;
739 port->open_count++;
740
741 /* currently opening/closing? wait ... */
742 } else if (port->openclose) {
743 status = -EBUSY;
744
745 /* ... else we do the work */
746 } else {
747 status = -EAGAIN;
748 port->openclose = true;
749 }
750 spin_unlock_irq(&port->port_lock);
751 }
752 mutex_unlock(&ports[port_num].lock);
753
754 switch (status) {
755 default:
756 /* fully handled */
757 return status;
758 case -EAGAIN:
759 /* must do the work */
760 break;
761 case -EBUSY:
762 /* wait for EAGAIN task to finish */
763 msleep(1);
764 /* REVISIT could have a waitchannel here, if
765 * concurrent open performance is important
766 */
767 break;
768 }
769 } while (status != -EAGAIN);
770
771 /* Do the "real open" */
772 spin_lock_irq(&port->port_lock);
773
774 /* allocate circular buffer on first open */
775 if (port->port_write_buf.buf_buf == NULL) {
776
777 spin_unlock_irq(&port->port_lock);
778 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
779 spin_lock_irq(&port->port_lock);
780
781 if (status) {
782 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
783 port->port_num, tty, file);
784 port->openclose = false;
785 goto exit_unlock_port;
786 }
787 }
788
789 /* REVISIT if REMOVED (ports[].port NULL), abort the open
790 * to let rmmod work faster (but this way isn't wrong).
791 */
792
793 /* REVISIT maybe wait for "carrier detect" */
794
795 tty->driver_data = port;
796 port->port_tty = tty;
797
798 port->open_count = 1;
799 port->openclose = false;
800
801 /* if connected, start the I/O stream */
802 if (port->port_usb) {
803 struct gserial *gser = port->port_usb;
804
805 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
806 gs_start_io(port);
807
808 if (gser->connect)
809 gser->connect(gser);
810 }
811
812 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
813
814 status = 0;
815
816 exit_unlock_port:
817 spin_unlock_irq(&port->port_lock);
818 return status;
819 }
820
gs_writes_finished(struct gs_port * p)821 static int gs_writes_finished(struct gs_port *p)
822 {
823 int cond;
824
825 /* return true on disconnect or empty buffer */
826 spin_lock_irq(&p->port_lock);
827 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
828 spin_unlock_irq(&p->port_lock);
829
830 return cond;
831 }
832
gs_close(struct tty_struct * tty,struct file * file)833 static void gs_close(struct tty_struct *tty, struct file *file)
834 {
835 struct gs_port *port = tty->driver_data;
836 struct gserial *gser;
837
838 spin_lock_irq(&port->port_lock);
839
840 if (port->open_count != 1) {
841 if (port->open_count == 0)
842 WARN_ON(1);
843 else
844 --port->open_count;
845 goto exit;
846 }
847
848 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
849
850 /* mark port as closing but in use; we can drop port lock
851 * and sleep if necessary
852 */
853 port->openclose = true;
854 port->open_count = 0;
855
856 gser = port->port_usb;
857 if (gser && gser->disconnect)
858 gser->disconnect(gser);
859
860 /* wait for circular write buffer to drain, disconnect, or at
861 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
862 */
863 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
864 spin_unlock_irq(&port->port_lock);
865 wait_event_interruptible_timeout(port->drain_wait,
866 gs_writes_finished(port),
867 GS_CLOSE_TIMEOUT * HZ);
868 spin_lock_irq(&port->port_lock);
869 gser = port->port_usb;
870 }
871
872 /* Iff we're disconnected, there can be no I/O in flight so it's
873 * ok to free the circular buffer; else just scrub it. And don't
874 * let the push tasklet fire again until we're re-opened.
875 */
876 if (gser == NULL)
877 gs_buf_free(&port->port_write_buf);
878 else
879 gs_buf_clear(&port->port_write_buf);
880
881 tty->driver_data = NULL;
882 port->port_tty = NULL;
883
884 port->openclose = false;
885
886 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
887 port->port_num, tty, file);
888
889 wake_up_interruptible(&port->close_wait);
890 exit:
891 spin_unlock_irq(&port->port_lock);
892 }
893
gs_write(struct tty_struct * tty,const unsigned char * buf,int count)894 static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
895 {
896 struct gs_port *port = tty->driver_data;
897 unsigned long flags;
898 int status;
899
900 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
901 port->port_num, tty, count);
902
903 spin_lock_irqsave(&port->port_lock, flags);
904 if (count)
905 count = gs_buf_put(&port->port_write_buf, buf, count);
906 /* treat count == 0 as flush_chars() */
907 if (port->port_usb)
908 status = gs_start_tx(port);
909 spin_unlock_irqrestore(&port->port_lock, flags);
910
911 return count;
912 }
913
gs_put_char(struct tty_struct * tty,unsigned char ch)914 static int gs_put_char(struct tty_struct *tty, unsigned char ch)
915 {
916 struct gs_port *port = tty->driver_data;
917 unsigned long flags;
918 int status;
919
920 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
921 port->port_num, tty, ch, __builtin_return_address(0));
922
923 spin_lock_irqsave(&port->port_lock, flags);
924 status = gs_buf_put(&port->port_write_buf, &ch, 1);
925 spin_unlock_irqrestore(&port->port_lock, flags);
926
927 return status;
928 }
929
gs_flush_chars(struct tty_struct * tty)930 static void gs_flush_chars(struct tty_struct *tty)
931 {
932 struct gs_port *port = tty->driver_data;
933 unsigned long flags;
934
935 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
936
937 spin_lock_irqsave(&port->port_lock, flags);
938 if (port->port_usb)
939 gs_start_tx(port);
940 spin_unlock_irqrestore(&port->port_lock, flags);
941 }
942
gs_write_room(struct tty_struct * tty)943 static int gs_write_room(struct tty_struct *tty)
944 {
945 struct gs_port *port = tty->driver_data;
946 unsigned long flags;
947 int room = 0;
948
949 spin_lock_irqsave(&port->port_lock, flags);
950 if (port->port_usb)
951 room = gs_buf_space_avail(&port->port_write_buf);
952 spin_unlock_irqrestore(&port->port_lock, flags);
953
954 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
955 port->port_num, tty, room);
956
957 return room;
958 }
959
gs_chars_in_buffer(struct tty_struct * tty)960 static int gs_chars_in_buffer(struct tty_struct *tty)
961 {
962 struct gs_port *port = tty->driver_data;
963 unsigned long flags;
964 int chars = 0;
965
966 spin_lock_irqsave(&port->port_lock, flags);
967 chars = gs_buf_data_avail(&port->port_write_buf);
968 spin_unlock_irqrestore(&port->port_lock, flags);
969
970 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
971 port->port_num, tty, chars);
972
973 return chars;
974 }
975
976 /* undo side effects of setting TTY_THROTTLED */
gs_unthrottle(struct tty_struct * tty)977 static void gs_unthrottle(struct tty_struct *tty)
978 {
979 struct gs_port *port = tty->driver_data;
980 unsigned long flags;
981
982 spin_lock_irqsave(&port->port_lock, flags);
983 if (port->port_usb) {
984 /* Kickstart read queue processing. We don't do xon/xoff,
985 * rts/cts, or other handshaking with the host, but if the
986 * read queue backs up enough we'll be NAKing OUT packets.
987 */
988 tasklet_schedule(&port->push);
989 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
990 }
991 spin_unlock_irqrestore(&port->port_lock, flags);
992 }
993
gs_break_ctl(struct tty_struct * tty,int duration)994 static int gs_break_ctl(struct tty_struct *tty, int duration)
995 {
996 struct gs_port *port = tty->driver_data;
997 int status = 0;
998 struct gserial *gser;
999
1000 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
1001 port->port_num, duration);
1002
1003 spin_lock_irq(&port->port_lock);
1004 gser = port->port_usb;
1005 if (gser && gser->send_break)
1006 status = gser->send_break(gser, duration);
1007 spin_unlock_irq(&port->port_lock);
1008
1009 return status;
1010 }
1011
1012 static const struct tty_operations gs_tty_ops = {
1013 .open = gs_open,
1014 .close = gs_close,
1015 .write = gs_write,
1016 .put_char = gs_put_char,
1017 .flush_chars = gs_flush_chars,
1018 .write_room = gs_write_room,
1019 .chars_in_buffer = gs_chars_in_buffer,
1020 .unthrottle = gs_unthrottle,
1021 .break_ctl = gs_break_ctl,
1022 };
1023
1024 /*-------------------------------------------------------------------------*/
1025
1026 static struct tty_driver *gs_tty_driver;
1027
1028 static int __init
gs_port_alloc(unsigned port_num,struct usb_cdc_line_coding * coding)1029 gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1030 {
1031 struct gs_port *port;
1032
1033 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1034 if (port == NULL)
1035 return -ENOMEM;
1036
1037 spin_lock_init(&port->port_lock);
1038 init_waitqueue_head(&port->close_wait);
1039 init_waitqueue_head(&port->drain_wait);
1040
1041 tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
1042
1043 INIT_LIST_HEAD(&port->read_pool);
1044 INIT_LIST_HEAD(&port->read_queue);
1045 INIT_LIST_HEAD(&port->write_pool);
1046
1047 port->port_num = port_num;
1048 port->port_line_coding = *coding;
1049
1050 ports[port_num].port = port;
1051
1052 return 0;
1053 }
1054
1055 /**
1056 * gserial_setup - initialize TTY driver for one or more ports
1057 * @g: gadget to associate with these ports
1058 * @count: how many ports to support
1059 * Context: may sleep
1060 *
1061 * The TTY stack needs to know in advance how many devices it should
1062 * plan to manage. Use this call to set up the ports you will be
1063 * exporting through USB. Later, connect them to functions based
1064 * on what configuration is activated by the USB host; and disconnect
1065 * them as appropriate.
1066 *
1067 * An example would be a two-configuration device in which both
1068 * configurations expose port 0, but through different functions.
1069 * One configuration could even expose port 1 while the other
1070 * one doesn't.
1071 *
1072 * Returns negative errno or zero.
1073 */
gserial_setup(struct usb_gadget * g,unsigned count)1074 int __init gserial_setup(struct usb_gadget *g, unsigned count)
1075 {
1076 unsigned i;
1077 struct usb_cdc_line_coding coding;
1078 int status;
1079
1080 if (count == 0 || count > N_PORTS)
1081 return -EINVAL;
1082
1083 gs_tty_driver = alloc_tty_driver(count);
1084 if (!gs_tty_driver)
1085 return -ENOMEM;
1086
1087 gs_tty_driver->driver_name = "g_serial";
1088 gs_tty_driver->name = PREFIX;
1089 /* uses dynamically assigned dev_t values */
1090
1091 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1092 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1093 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1094 gs_tty_driver->init_termios = tty_std_termios;
1095
1096 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1097 * MS-Windows. Otherwise, most of these flags shouldn't affect
1098 * anything unless we were to actually hook up to a serial line.
1099 */
1100 gs_tty_driver->init_termios.c_cflag =
1101 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1102 gs_tty_driver->init_termios.c_ispeed = 9600;
1103 gs_tty_driver->init_termios.c_ospeed = 9600;
1104
1105 coding.dwDTERate = cpu_to_le32(9600);
1106 coding.bCharFormat = 8;
1107 coding.bParityType = USB_CDC_NO_PARITY;
1108 coding.bDataBits = USB_CDC_1_STOP_BITS;
1109
1110 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1111
1112 /* make devices be openable */
1113 for (i = 0; i < count; i++) {
1114 mutex_init(&ports[i].lock);
1115 status = gs_port_alloc(i, &coding);
1116 if (status) {
1117 count = i;
1118 goto fail;
1119 }
1120 }
1121 n_ports = count;
1122
1123 /* export the driver ... */
1124 status = tty_register_driver(gs_tty_driver);
1125 if (status) {
1126 pr_err("%s: cannot register, err %d\n",
1127 __func__, status);
1128 goto fail;
1129 }
1130
1131 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1132 for (i = 0; i < count; i++) {
1133 struct device *tty_dev;
1134
1135 tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
1136 if (IS_ERR(tty_dev))
1137 pr_warning("%s: no classdev for port %d, err %ld\n",
1138 __func__, i, PTR_ERR(tty_dev));
1139 }
1140
1141 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1142 count, (count == 1) ? "" : "s");
1143
1144 return status;
1145 fail:
1146 while (count--)
1147 kfree(ports[count].port);
1148 put_tty_driver(gs_tty_driver);
1149 gs_tty_driver = NULL;
1150 return status;
1151 }
1152
gs_closed(struct gs_port * port)1153 static int gs_closed(struct gs_port *port)
1154 {
1155 int cond;
1156
1157 spin_lock_irq(&port->port_lock);
1158 cond = (port->open_count == 0) && !port->openclose;
1159 spin_unlock_irq(&port->port_lock);
1160 return cond;
1161 }
1162
1163 /**
1164 * gserial_cleanup - remove TTY-over-USB driver and devices
1165 * Context: may sleep
1166 *
1167 * This is called to free all resources allocated by @gserial_setup().
1168 * Accordingly, it may need to wait until some open /dev/ files have
1169 * closed.
1170 *
1171 * The caller must have issued @gserial_disconnect() for any ports
1172 * that had previously been connected, so that there is never any
1173 * I/O pending when it's called.
1174 */
gserial_cleanup(void)1175 void gserial_cleanup(void)
1176 {
1177 unsigned i;
1178 struct gs_port *port;
1179
1180 if (!gs_tty_driver)
1181 return;
1182
1183 /* start sysfs and /dev/ttyGS* node removal */
1184 for (i = 0; i < n_ports; i++)
1185 tty_unregister_device(gs_tty_driver, i);
1186
1187 for (i = 0; i < n_ports; i++) {
1188 /* prevent new opens */
1189 mutex_lock(&ports[i].lock);
1190 port = ports[i].port;
1191 ports[i].port = NULL;
1192 mutex_unlock(&ports[i].lock);
1193
1194 tasklet_kill(&port->push);
1195
1196 /* wait for old opens to finish */
1197 wait_event(port->close_wait, gs_closed(port));
1198
1199 WARN_ON(port->port_usb != NULL);
1200
1201 kfree(port);
1202 }
1203 n_ports = 0;
1204
1205 tty_unregister_driver(gs_tty_driver);
1206 put_tty_driver(gs_tty_driver);
1207 gs_tty_driver = NULL;
1208
1209 pr_debug("%s: cleaned up ttyGS* support\n", __func__);
1210 }
1211
1212 /**
1213 * gserial_connect - notify TTY I/O glue that USB link is active
1214 * @gser: the function, set up with endpoints and descriptors
1215 * @port_num: which port is active
1216 * Context: any (usually from irq)
1217 *
1218 * This is called activate endpoints and let the TTY layer know that
1219 * the connection is active ... not unlike "carrier detect". It won't
1220 * necessarily start I/O queues; unless the TTY is held open by any
1221 * task, there would be no point. However, the endpoints will be
1222 * activated so the USB host can perform I/O, subject to basic USB
1223 * hardware flow control.
1224 *
1225 * Caller needs to have set up the endpoints and USB function in @dev
1226 * before calling this, as well as the appropriate (speed-specific)
1227 * endpoint descriptors, and also have set up the TTY driver by calling
1228 * @gserial_setup().
1229 *
1230 * Returns negative errno or zero.
1231 * On success, ep->driver_data will be overwritten.
1232 */
gserial_connect(struct gserial * gser,u8 port_num)1233 int gserial_connect(struct gserial *gser, u8 port_num)
1234 {
1235 struct gs_port *port;
1236 unsigned long flags;
1237 int status;
1238
1239 if (!gs_tty_driver || port_num >= n_ports)
1240 return -ENXIO;
1241
1242 /* we "know" gserial_cleanup() hasn't been called */
1243 port = ports[port_num].port;
1244
1245 /* activate the endpoints */
1246 status = usb_ep_enable(gser->in);
1247 if (status < 0)
1248 return status;
1249 gser->in->driver_data = port;
1250
1251 status = usb_ep_enable(gser->out);
1252 if (status < 0)
1253 goto fail_out;
1254 gser->out->driver_data = port;
1255
1256 /* then tell the tty glue that I/O can work */
1257 spin_lock_irqsave(&port->port_lock, flags);
1258 gser->ioport = port;
1259 port->port_usb = gser;
1260
1261 /* REVISIT unclear how best to handle this state...
1262 * we don't really couple it with the Linux TTY.
1263 */
1264 gser->port_line_coding = port->port_line_coding;
1265
1266 /* REVISIT if waiting on "carrier detect", signal. */
1267
1268 /* if it's already open, start I/O ... and notify the serial
1269 * protocol about open/close status (connect/disconnect).
1270 */
1271 if (port->open_count) {
1272 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1273 gs_start_io(port);
1274 if (gser->connect)
1275 gser->connect(gser);
1276 } else {
1277 if (gser->disconnect)
1278 gser->disconnect(gser);
1279 }
1280
1281 spin_unlock_irqrestore(&port->port_lock, flags);
1282
1283 return status;
1284
1285 fail_out:
1286 usb_ep_disable(gser->in);
1287 gser->in->driver_data = NULL;
1288 return status;
1289 }
1290
1291 /**
1292 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1293 * @gser: the function, on which gserial_connect() was called
1294 * Context: any (usually from irq)
1295 *
1296 * This is called to deactivate endpoints and let the TTY layer know
1297 * that the connection went inactive ... not unlike "hangup".
1298 *
1299 * On return, the state is as if gserial_connect() had never been called;
1300 * there is no active USB I/O on these endpoints.
1301 */
gserial_disconnect(struct gserial * gser)1302 void gserial_disconnect(struct gserial *gser)
1303 {
1304 struct gs_port *port = gser->ioport;
1305 unsigned long flags;
1306
1307 if (!port)
1308 return;
1309
1310 /* tell the TTY glue not to do I/O here any more */
1311 spin_lock_irqsave(&port->port_lock, flags);
1312
1313 /* REVISIT as above: how best to track this? */
1314 port->port_line_coding = gser->port_line_coding;
1315
1316 port->port_usb = NULL;
1317 gser->ioport = NULL;
1318 if (port->open_count > 0 || port->openclose) {
1319 wake_up_interruptible(&port->drain_wait);
1320 if (port->port_tty)
1321 tty_hangup(port->port_tty);
1322 }
1323 spin_unlock_irqrestore(&port->port_lock, flags);
1324
1325 /* disable endpoints, aborting down any active I/O */
1326 usb_ep_disable(gser->out);
1327 gser->out->driver_data = NULL;
1328
1329 usb_ep_disable(gser->in);
1330 gser->in->driver_data = NULL;
1331
1332 /* finally, free any unused/unusable I/O buffers */
1333 spin_lock_irqsave(&port->port_lock, flags);
1334 if (port->open_count == 0 && !port->openclose)
1335 gs_buf_free(&port->port_write_buf);
1336 gs_free_requests(gser->out, &port->read_pool, NULL);
1337 gs_free_requests(gser->out, &port->read_queue, NULL);
1338 gs_free_requests(gser->in, &port->write_pool, NULL);
1339
1340 port->read_allocated = port->read_started =
1341 port->write_allocated = port->write_started = 0;
1342
1343 spin_unlock_irqrestore(&port->port_lock, flags);
1344 }
1345