1 /*
2  * MUSB OTG driver peripheral support
3  *
4  * Copyright 2005 Mentor Graphics Corporation
5  * Copyright (C) 2005-2006 by Texas Instruments
6  * Copyright (C) 2006-2007 Nokia Corporation
7  * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21  * 02110-1301 USA
22  *
23  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
26  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/timer.h>
39 #include <linux/module.h>
40 #include <linux/smp.h>
41 #include <linux/spinlock.h>
42 #include <linux/delay.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h>
45 
46 #include "musb_core.h"
47 
48 
49 /* MUSB PERIPHERAL status 3-mar-2006:
50  *
51  * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
52  *   Minor glitches:
53  *
54  *     + remote wakeup to Linux hosts work, but saw USBCV failures;
55  *       in one test run (operator error?)
56  *     + endpoint halt tests -- in both usbtest and usbcv -- seem
57  *       to break when dma is enabled ... is something wrongly
58  *       clearing SENDSTALL?
59  *
60  * - Mass storage behaved ok when last tested.  Network traffic patterns
61  *   (with lots of short transfers etc) need retesting; they turn up the
62  *   worst cases of the DMA, since short packets are typical but are not
63  *   required.
64  *
65  * - TX/IN
66  *     + both pio and dma behave in with network and g_zero tests
67  *     + no cppi throughput issues other than no-hw-queueing
68  *     + failed with FLAT_REG (DaVinci)
69  *     + seems to behave with double buffering, PIO -and- CPPI
70  *     + with gadgetfs + AIO, requests got lost?
71  *
72  * - RX/OUT
73  *     + both pio and dma behave in with network and g_zero tests
74  *     + dma is slow in typical case (short_not_ok is clear)
75  *     + double buffering ok with PIO
76  *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
77  *     + request lossage observed with gadgetfs
78  *
79  * - ISO not tested ... might work, but only weakly isochronous
80  *
81  * - Gadget driver disabling of softconnect during bind() is ignored; so
82  *   drivers can't hold off host requests until userspace is ready.
83  *   (Workaround:  they can turn it off later.)
84  *
85  * - PORTABILITY (assumes PIO works):
86  *     + DaVinci, basically works with cppi dma
87  *     + OMAP 2430, ditto with mentor dma
88  *     + TUSB 6010, platform-specific dma in the works
89  */
90 
91 /* ----------------------------------------------------------------------- */
92 
93 #define is_buffer_mapped(req) (is_dma_capable() && \
94 					(req->map_state != UN_MAPPED))
95 
96 /* Maps the buffer to dma  */
97 
map_dma_buffer(struct musb_request * request,struct musb * musb,struct musb_ep * musb_ep)98 static inline void map_dma_buffer(struct musb_request *request,
99 			struct musb *musb, struct musb_ep *musb_ep)
100 {
101 	int compatible = true;
102 	struct dma_controller *dma = musb->dma_controller;
103 
104 	request->map_state = UN_MAPPED;
105 
106 	if (!is_dma_capable() || !musb_ep->dma)
107 		return;
108 
109 	/* Check if DMA engine can handle this request.
110 	 * DMA code must reject the USB request explicitly.
111 	 * Default behaviour is to map the request.
112 	 */
113 	if (dma->is_compatible)
114 		compatible = dma->is_compatible(musb_ep->dma,
115 				musb_ep->packet_sz, request->request.buf,
116 				request->request.length);
117 	if (!compatible)
118 		return;
119 
120 	if (request->request.dma == DMA_ADDR_INVALID) {
121 		request->request.dma = dma_map_single(
122 				musb->controller,
123 				request->request.buf,
124 				request->request.length,
125 				request->tx
126 					? DMA_TO_DEVICE
127 					: DMA_FROM_DEVICE);
128 		request->map_state = MUSB_MAPPED;
129 	} else {
130 		dma_sync_single_for_device(musb->controller,
131 			request->request.dma,
132 			request->request.length,
133 			request->tx
134 				? DMA_TO_DEVICE
135 				: DMA_FROM_DEVICE);
136 		request->map_state = PRE_MAPPED;
137 	}
138 }
139 
140 /* Unmap the buffer from dma and maps it back to cpu */
unmap_dma_buffer(struct musb_request * request,struct musb * musb)141 static inline void unmap_dma_buffer(struct musb_request *request,
142 				struct musb *musb)
143 {
144 	if (!is_buffer_mapped(request))
145 		return;
146 
147 	if (request->request.dma == DMA_ADDR_INVALID) {
148 		dev_vdbg(musb->controller,
149 				"not unmapping a never mapped buffer\n");
150 		return;
151 	}
152 	if (request->map_state == MUSB_MAPPED) {
153 		dma_unmap_single(musb->controller,
154 			request->request.dma,
155 			request->request.length,
156 			request->tx
157 				? DMA_TO_DEVICE
158 				: DMA_FROM_DEVICE);
159 		request->request.dma = DMA_ADDR_INVALID;
160 	} else { /* PRE_MAPPED */
161 		dma_sync_single_for_cpu(musb->controller,
162 			request->request.dma,
163 			request->request.length,
164 			request->tx
165 				? DMA_TO_DEVICE
166 				: DMA_FROM_DEVICE);
167 	}
168 	request->map_state = UN_MAPPED;
169 }
170 
171 /*
172  * Immediately complete a request.
173  *
174  * @param request the request to complete
175  * @param status the status to complete the request with
176  * Context: controller locked, IRQs blocked.
177  */
musb_g_giveback(struct musb_ep * ep,struct usb_request * request,int status)178 void musb_g_giveback(
179 	struct musb_ep		*ep,
180 	struct usb_request	*request,
181 	int			status)
182 __releases(ep->musb->lock)
183 __acquires(ep->musb->lock)
184 {
185 	struct musb_request	*req;
186 	struct musb		*musb;
187 	int			busy = ep->busy;
188 
189 	req = to_musb_request(request);
190 
191 	list_del(&req->list);
192 	if (req->request.status == -EINPROGRESS)
193 		req->request.status = status;
194 	musb = req->musb;
195 
196 	ep->busy = 1;
197 	spin_unlock(&musb->lock);
198 	unmap_dma_buffer(req, musb);
199 	if (request->status == 0)
200 		dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
201 				ep->end_point.name, request,
202 				req->request.actual, req->request.length);
203 	else
204 		dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
205 				ep->end_point.name, request,
206 				req->request.actual, req->request.length,
207 				request->status);
208 	req->request.complete(&req->ep->end_point, &req->request);
209 	spin_lock(&musb->lock);
210 	ep->busy = busy;
211 }
212 
213 /* ----------------------------------------------------------------------- */
214 
215 /*
216  * Abort requests queued to an endpoint using the status. Synchronous.
217  * caller locked controller and blocked irqs, and selected this ep.
218  */
nuke(struct musb_ep * ep,const int status)219 static void nuke(struct musb_ep *ep, const int status)
220 {
221 	struct musb		*musb = ep->musb;
222 	struct musb_request	*req = NULL;
223 	void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
224 
225 	ep->busy = 1;
226 
227 	if (is_dma_capable() && ep->dma) {
228 		struct dma_controller	*c = ep->musb->dma_controller;
229 		int value;
230 
231 		if (ep->is_in) {
232 			/*
233 			 * The programming guide says that we must not clear
234 			 * the DMAMODE bit before DMAENAB, so we only
235 			 * clear it in the second write...
236 			 */
237 			musb_writew(epio, MUSB_TXCSR,
238 				    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
239 			musb_writew(epio, MUSB_TXCSR,
240 					0 | MUSB_TXCSR_FLUSHFIFO);
241 		} else {
242 			musb_writew(epio, MUSB_RXCSR,
243 					0 | MUSB_RXCSR_FLUSHFIFO);
244 			musb_writew(epio, MUSB_RXCSR,
245 					0 | MUSB_RXCSR_FLUSHFIFO);
246 		}
247 
248 		value = c->channel_abort(ep->dma);
249 		dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
250 				ep->name, value);
251 		c->channel_release(ep->dma);
252 		ep->dma = NULL;
253 	}
254 
255 	while (!list_empty(&ep->req_list)) {
256 		req = list_first_entry(&ep->req_list, struct musb_request, list);
257 		musb_g_giveback(ep, &req->request, status);
258 	}
259 }
260 
261 /* ----------------------------------------------------------------------- */
262 
263 /* Data transfers - pure PIO, pure DMA, or mixed mode */
264 
265 /*
266  * This assumes the separate CPPI engine is responding to DMA requests
267  * from the usb core ... sequenced a bit differently from mentor dma.
268  */
269 
max_ep_writesize(struct musb * musb,struct musb_ep * ep)270 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
271 {
272 	if (can_bulk_split(musb, ep->type))
273 		return ep->hw_ep->max_packet_sz_tx;
274 	else
275 		return ep->packet_sz;
276 }
277 
278 
279 #ifdef CONFIG_USB_INVENTRA_DMA
280 
281 /* Peripheral tx (IN) using Mentor DMA works as follows:
282 	Only mode 0 is used for transfers <= wPktSize,
283 	mode 1 is used for larger transfers,
284 
285 	One of the following happens:
286 	- Host sends IN token which causes an endpoint interrupt
287 		-> TxAvail
288 			-> if DMA is currently busy, exit.
289 			-> if queue is non-empty, txstate().
290 
291 	- Request is queued by the gadget driver.
292 		-> if queue was previously empty, txstate()
293 
294 	txstate()
295 		-> start
296 		  /\	-> setup DMA
297 		  |     (data is transferred to the FIFO, then sent out when
298 		  |	IN token(s) are recd from Host.
299 		  |		-> DMA interrupt on completion
300 		  |		   calls TxAvail.
301 		  |		      -> stop DMA, ~DMAENAB,
302 		  |		      -> set TxPktRdy for last short pkt or zlp
303 		  |		      -> Complete Request
304 		  |		      -> Continue next request (call txstate)
305 		  |___________________________________|
306 
307  * Non-Mentor DMA engines can of course work differently, such as by
308  * upleveling from irq-per-packet to irq-per-buffer.
309  */
310 
311 #endif
312 
313 /*
314  * An endpoint is transmitting data. This can be called either from
315  * the IRQ routine or from ep.queue() to kickstart a request on an
316  * endpoint.
317  *
318  * Context: controller locked, IRQs blocked, endpoint selected
319  */
txstate(struct musb * musb,struct musb_request * req)320 static void txstate(struct musb *musb, struct musb_request *req)
321 {
322 	u8			epnum = req->epnum;
323 	struct musb_ep		*musb_ep;
324 	void __iomem		*epio = musb->endpoints[epnum].regs;
325 	struct usb_request	*request;
326 	u16			fifo_count = 0, csr;
327 	int			use_dma = 0;
328 
329 	musb_ep = req->ep;
330 
331 	/* we shouldn't get here while DMA is active ... but we do ... */
332 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
333 		dev_dbg(musb->controller, "dma pending...\n");
334 		return;
335 	}
336 
337 	/* read TXCSR before */
338 	csr = musb_readw(epio, MUSB_TXCSR);
339 
340 	request = &req->request;
341 	fifo_count = min(max_ep_writesize(musb, musb_ep),
342 			(int)(request->length - request->actual));
343 
344 	if (csr & MUSB_TXCSR_TXPKTRDY) {
345 		dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
346 				musb_ep->end_point.name, csr);
347 		return;
348 	}
349 
350 	if (csr & MUSB_TXCSR_P_SENDSTALL) {
351 		dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
352 				musb_ep->end_point.name, csr);
353 		return;
354 	}
355 
356 	dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
357 			epnum, musb_ep->packet_sz, fifo_count,
358 			csr);
359 
360 #ifndef	CONFIG_MUSB_PIO_ONLY
361 	if (is_buffer_mapped(req)) {
362 		struct dma_controller	*c = musb->dma_controller;
363 		size_t request_size;
364 
365 		/* setup DMA, then program endpoint CSR */
366 		request_size = min_t(size_t, request->length - request->actual,
367 					musb_ep->dma->max_len);
368 
369 		use_dma = (request->dma != DMA_ADDR_INVALID);
370 
371 		/* MUSB_TXCSR_P_ISO is still set correctly */
372 
373 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
374 		{
375 			if (request_size < musb_ep->packet_sz)
376 				musb_ep->dma->desired_mode = 0;
377 			else
378 				musb_ep->dma->desired_mode = 1;
379 
380 			use_dma = use_dma && c->channel_program(
381 					musb_ep->dma, musb_ep->packet_sz,
382 					musb_ep->dma->desired_mode,
383 					request->dma + request->actual, request_size);
384 			if (use_dma) {
385 				if (musb_ep->dma->desired_mode == 0) {
386 					/*
387 					 * We must not clear the DMAMODE bit
388 					 * before the DMAENAB bit -- and the
389 					 * latter doesn't always get cleared
390 					 * before we get here...
391 					 */
392 					csr &= ~(MUSB_TXCSR_AUTOSET
393 						| MUSB_TXCSR_DMAENAB);
394 					musb_writew(epio, MUSB_TXCSR, csr
395 						| MUSB_TXCSR_P_WZC_BITS);
396 					csr &= ~MUSB_TXCSR_DMAMODE;
397 					csr |= (MUSB_TXCSR_DMAENAB |
398 							MUSB_TXCSR_MODE);
399 					/* against programming guide */
400 				} else {
401 					csr |= (MUSB_TXCSR_DMAENAB
402 							| MUSB_TXCSR_DMAMODE
403 							| MUSB_TXCSR_MODE);
404 					/*
405 					 * Enable Autoset according to table
406 					 * below
407 					 * bulk_split hb_mult	Autoset_Enable
408 					 *	0	0	Yes(Normal)
409 					 *	0	>0	No(High BW ISO)
410 					 *	1	0	Yes(HS bulk)
411 					 *	1	>0	Yes(FS bulk)
412 					 */
413 					if (!musb_ep->hb_mult ||
414 						(musb_ep->hb_mult &&
415 						 can_bulk_split(musb,
416 						    musb_ep->type)))
417 						csr |= MUSB_TXCSR_AUTOSET;
418 				}
419 				csr &= ~MUSB_TXCSR_P_UNDERRUN;
420 
421 				musb_writew(epio, MUSB_TXCSR, csr);
422 			}
423 		}
424 
425 #elif defined(CONFIG_USB_TI_CPPI_DMA)
426 		/* program endpoint CSR first, then setup DMA */
427 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
428 		csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
429 		       MUSB_TXCSR_MODE;
430 		musb_writew(epio, MUSB_TXCSR,
431 			(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
432 				| csr);
433 
434 		/* ensure writebuffer is empty */
435 		csr = musb_readw(epio, MUSB_TXCSR);
436 
437 		/* NOTE host side sets DMAENAB later than this; both are
438 		 * OK since the transfer dma glue (between CPPI and Mentor
439 		 * fifos) just tells CPPI it could start.  Data only moves
440 		 * to the USB TX fifo when both fifos are ready.
441 		 */
442 
443 		/* "mode" is irrelevant here; handle terminating ZLPs like
444 		 * PIO does, since the hardware RNDIS mode seems unreliable
445 		 * except for the last-packet-is-already-short case.
446 		 */
447 		use_dma = use_dma && c->channel_program(
448 				musb_ep->dma, musb_ep->packet_sz,
449 				0,
450 				request->dma + request->actual,
451 				request_size);
452 		if (!use_dma) {
453 			c->channel_release(musb_ep->dma);
454 			musb_ep->dma = NULL;
455 			csr &= ~MUSB_TXCSR_DMAENAB;
456 			musb_writew(epio, MUSB_TXCSR, csr);
457 			/* invariant: prequest->buf is non-null */
458 		}
459 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
460 		use_dma = use_dma && c->channel_program(
461 				musb_ep->dma, musb_ep->packet_sz,
462 				request->zero,
463 				request->dma + request->actual,
464 				request_size);
465 #endif
466 	}
467 #endif
468 
469 	if (!use_dma) {
470 		/*
471 		 * Unmap the dma buffer back to cpu if dma channel
472 		 * programming fails
473 		 */
474 		unmap_dma_buffer(req, musb);
475 
476 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
477 				(u8 *) (request->buf + request->actual));
478 		request->actual += fifo_count;
479 		csr |= MUSB_TXCSR_TXPKTRDY;
480 		csr &= ~MUSB_TXCSR_P_UNDERRUN;
481 		musb_writew(epio, MUSB_TXCSR, csr);
482 	}
483 
484 	/* host may already have the data when this message shows... */
485 	dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
486 			musb_ep->end_point.name, use_dma ? "dma" : "pio",
487 			request->actual, request->length,
488 			musb_readw(epio, MUSB_TXCSR),
489 			fifo_count,
490 			musb_readw(epio, MUSB_TXMAXP));
491 }
492 
493 /*
494  * FIFO state update (e.g. data ready).
495  * Called from IRQ,  with controller locked.
496  */
musb_g_tx(struct musb * musb,u8 epnum)497 void musb_g_tx(struct musb *musb, u8 epnum)
498 {
499 	u16			csr;
500 	struct musb_request	*req;
501 	struct usb_request	*request;
502 	u8 __iomem		*mbase = musb->mregs;
503 	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_in;
504 	void __iomem		*epio = musb->endpoints[epnum].regs;
505 	struct dma_channel	*dma;
506 
507 	musb_ep_select(mbase, epnum);
508 	req = next_request(musb_ep);
509 	request = &req->request;
510 
511 	csr = musb_readw(epio, MUSB_TXCSR);
512 	dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
513 
514 	dma = is_dma_capable() ? musb_ep->dma : NULL;
515 
516 	/*
517 	 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
518 	 * probably rates reporting as a host error.
519 	 */
520 	if (csr & MUSB_TXCSR_P_SENTSTALL) {
521 		csr |=	MUSB_TXCSR_P_WZC_BITS;
522 		csr &= ~MUSB_TXCSR_P_SENTSTALL;
523 		musb_writew(epio, MUSB_TXCSR, csr);
524 		return;
525 	}
526 
527 	if (csr & MUSB_TXCSR_P_UNDERRUN) {
528 		/* We NAKed, no big deal... little reason to care. */
529 		csr |=	 MUSB_TXCSR_P_WZC_BITS;
530 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
531 		musb_writew(epio, MUSB_TXCSR, csr);
532 		dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
533 				epnum, request);
534 	}
535 
536 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
537 		/*
538 		 * SHOULD NOT HAPPEN... has with CPPI though, after
539 		 * changing SENDSTALL (and other cases); harmless?
540 		 */
541 		dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
542 		return;
543 	}
544 
545 	if (request) {
546 		u8	is_dma = 0;
547 
548 		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
549 			is_dma = 1;
550 			csr |= MUSB_TXCSR_P_WZC_BITS;
551 			csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
552 				 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
553 			musb_writew(epio, MUSB_TXCSR, csr);
554 			/* Ensure writebuffer is empty. */
555 			csr = musb_readw(epio, MUSB_TXCSR);
556 			request->actual += musb_ep->dma->actual_len;
557 			dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
558 				epnum, csr, musb_ep->dma->actual_len, request);
559 		}
560 
561 		/*
562 		 * First, maybe a terminating short packet. Some DMA
563 		 * engines might handle this by themselves.
564 		 */
565 		if ((request->zero && request->length
566 			&& (request->length % musb_ep->packet_sz == 0)
567 			&& (request->actual == request->length))
568 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
569 			|| (is_dma && (!dma->desired_mode ||
570 				(request->actual &
571 					(musb_ep->packet_sz - 1))))
572 #endif
573 		) {
574 			/*
575 			 * On DMA completion, FIFO may not be
576 			 * available yet...
577 			 */
578 			if (csr & MUSB_TXCSR_TXPKTRDY)
579 				return;
580 
581 			dev_dbg(musb->controller, "sending zero pkt\n");
582 			musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
583 					| MUSB_TXCSR_TXPKTRDY);
584 			request->zero = 0;
585 		}
586 
587 		if (request->actual == request->length) {
588 			musb_g_giveback(musb_ep, request, 0);
589 			/*
590 			 * In the giveback function the MUSB lock is
591 			 * released and acquired after sometime. During
592 			 * this time period the INDEX register could get
593 			 * changed by the gadget_queue function especially
594 			 * on SMP systems. Reselect the INDEX to be sure
595 			 * we are reading/modifying the right registers
596 			 */
597 			musb_ep_select(mbase, epnum);
598 			req = musb_ep->desc ? next_request(musb_ep) : NULL;
599 			if (!req) {
600 				dev_dbg(musb->controller, "%s idle now\n",
601 					musb_ep->end_point.name);
602 				return;
603 			}
604 		}
605 
606 		txstate(musb, req);
607 	}
608 }
609 
610 /* ------------------------------------------------------------ */
611 
612 #ifdef CONFIG_USB_INVENTRA_DMA
613 
614 /* Peripheral rx (OUT) using Mentor DMA works as follows:
615 	- Only mode 0 is used.
616 
617 	- Request is queued by the gadget class driver.
618 		-> if queue was previously empty, rxstate()
619 
620 	- Host sends OUT token which causes an endpoint interrupt
621 	  /\      -> RxReady
622 	  |	      -> if request queued, call rxstate
623 	  |		/\	-> setup DMA
624 	  |		|	     -> DMA interrupt on completion
625 	  |		|		-> RxReady
626 	  |		|		      -> stop DMA
627 	  |		|		      -> ack the read
628 	  |		|		      -> if data recd = max expected
629 	  |		|				by the request, or host
630 	  |		|				sent a short packet,
631 	  |		|				complete the request,
632 	  |		|				and start the next one.
633 	  |		|_____________________________________|
634 	  |					 else just wait for the host
635 	  |					    to send the next OUT token.
636 	  |__________________________________________________|
637 
638  * Non-Mentor DMA engines can of course work differently.
639  */
640 
641 #endif
642 
643 /*
644  * Context: controller locked, IRQs blocked, endpoint selected
645  */
rxstate(struct musb * musb,struct musb_request * req)646 static void rxstate(struct musb *musb, struct musb_request *req)
647 {
648 	const u8		epnum = req->epnum;
649 	struct usb_request	*request = &req->request;
650 	struct musb_ep		*musb_ep;
651 	void __iomem		*epio = musb->endpoints[epnum].regs;
652 	unsigned		fifo_count = 0;
653 	u16			len;
654 	u16			csr = musb_readw(epio, MUSB_RXCSR);
655 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
656 	u8			use_mode_1;
657 
658 	if (hw_ep->is_shared_fifo)
659 		musb_ep = &hw_ep->ep_in;
660 	else
661 		musb_ep = &hw_ep->ep_out;
662 
663 	len = musb_ep->packet_sz;
664 
665 	/* We shouldn't get here while DMA is active, but we do... */
666 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
667 		dev_dbg(musb->controller, "DMA pending...\n");
668 		return;
669 	}
670 
671 	if (csr & MUSB_RXCSR_P_SENDSTALL) {
672 		dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
673 		    musb_ep->end_point.name, csr);
674 		return;
675 	}
676 
677 	if (is_cppi_enabled() && is_buffer_mapped(req)) {
678 		struct dma_controller	*c = musb->dma_controller;
679 		struct dma_channel	*channel = musb_ep->dma;
680 
681 		/* NOTE:  CPPI won't actually stop advancing the DMA
682 		 * queue after short packet transfers, so this is almost
683 		 * always going to run as IRQ-per-packet DMA so that
684 		 * faults will be handled correctly.
685 		 */
686 		if (c->channel_program(channel,
687 				musb_ep->packet_sz,
688 				!request->short_not_ok,
689 				request->dma + request->actual,
690 				request->length - request->actual)) {
691 
692 			/* make sure that if an rxpkt arrived after the irq,
693 			 * the cppi engine will be ready to take it as soon
694 			 * as DMA is enabled
695 			 */
696 			csr &= ~(MUSB_RXCSR_AUTOCLEAR
697 					| MUSB_RXCSR_DMAMODE);
698 			csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
699 			musb_writew(epio, MUSB_RXCSR, csr);
700 			return;
701 		}
702 	}
703 
704 	if (csr & MUSB_RXCSR_RXPKTRDY) {
705 		len = musb_readw(epio, MUSB_RXCOUNT);
706 
707 		/*
708 		 * Enable Mode 1 on RX transfers only when short_not_ok flag
709 		 * is set. Currently short_not_ok flag is set only from
710 		 * file_storage and f_mass_storage drivers
711 		 */
712 
713 		if (request->short_not_ok && len == musb_ep->packet_sz)
714 			use_mode_1 = 1;
715 		else
716 			use_mode_1 = 0;
717 
718 		if (request->actual < request->length) {
719 #ifdef CONFIG_USB_INVENTRA_DMA
720 			if (is_buffer_mapped(req)) {
721 				struct dma_controller	*c;
722 				struct dma_channel	*channel;
723 				int			use_dma = 0;
724 
725 				c = musb->dma_controller;
726 				channel = musb_ep->dma;
727 
728 	/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
729 	 * mode 0 only. So we do not get endpoint interrupts due to DMA
730 	 * completion. We only get interrupts from DMA controller.
731 	 *
732 	 * We could operate in DMA mode 1 if we knew the size of the tranfer
733 	 * in advance. For mass storage class, request->length = what the host
734 	 * sends, so that'd work.  But for pretty much everything else,
735 	 * request->length is routinely more than what the host sends. For
736 	 * most these gadgets, end of is signified either by a short packet,
737 	 * or filling the last byte of the buffer.  (Sending extra data in
738 	 * that last pckate should trigger an overflow fault.)  But in mode 1,
739 	 * we don't get DMA completion interrupt for short packets.
740 	 *
741 	 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
742 	 * to get endpoint interrupt on every DMA req, but that didn't seem
743 	 * to work reliably.
744 	 *
745 	 * REVISIT an updated g_file_storage can set req->short_not_ok, which
746 	 * then becomes usable as a runtime "use mode 1" hint...
747 	 */
748 
749 				/* Experimental: Mode1 works with mass storage use cases */
750 				if (use_mode_1) {
751 					csr |= MUSB_RXCSR_AUTOCLEAR;
752 					musb_writew(epio, MUSB_RXCSR, csr);
753 					csr |= MUSB_RXCSR_DMAENAB;
754 					musb_writew(epio, MUSB_RXCSR, csr);
755 
756 					/*
757 					 * this special sequence (enabling and then
758 					 * disabling MUSB_RXCSR_DMAMODE) is required
759 					 * to get DMAReq to activate
760 					 */
761 					musb_writew(epio, MUSB_RXCSR,
762 						csr | MUSB_RXCSR_DMAMODE);
763 					musb_writew(epio, MUSB_RXCSR, csr);
764 
765 				} else {
766 					if (!musb_ep->hb_mult &&
767 						musb_ep->hw_ep->rx_double_buffered)
768 						csr |= MUSB_RXCSR_AUTOCLEAR;
769 					csr |= MUSB_RXCSR_DMAENAB;
770 					musb_writew(epio, MUSB_RXCSR, csr);
771 				}
772 
773 				if (request->actual < request->length) {
774 					int transfer_size = 0;
775 					if (use_mode_1) {
776 						transfer_size = min(request->length - request->actual,
777 								channel->max_len);
778 						musb_ep->dma->desired_mode = 1;
779 					} else {
780 						transfer_size = min(request->length - request->actual,
781 								(unsigned)len);
782 						musb_ep->dma->desired_mode = 0;
783 					}
784 
785 					use_dma = c->channel_program(
786 							channel,
787 							musb_ep->packet_sz,
788 							channel->desired_mode,
789 							request->dma
790 							+ request->actual,
791 							transfer_size);
792 				}
793 
794 				if (use_dma)
795 					return;
796 			}
797 #elif defined(CONFIG_USB_UX500_DMA)
798 			if ((is_buffer_mapped(req)) &&
799 				(request->actual < request->length)) {
800 
801 				struct dma_controller *c;
802 				struct dma_channel *channel;
803 				int transfer_size = 0;
804 
805 				c = musb->dma_controller;
806 				channel = musb_ep->dma;
807 
808 				/* In case first packet is short */
809 				if (len < musb_ep->packet_sz)
810 					transfer_size = len;
811 				else if (request->short_not_ok)
812 					transfer_size =	min(request->length -
813 							request->actual,
814 							channel->max_len);
815 				else
816 					transfer_size = min(request->length -
817 							request->actual,
818 							(unsigned)len);
819 
820 				csr &= ~MUSB_RXCSR_DMAMODE;
821 				csr |= (MUSB_RXCSR_DMAENAB |
822 					MUSB_RXCSR_AUTOCLEAR);
823 
824 				musb_writew(epio, MUSB_RXCSR, csr);
825 
826 				if (transfer_size <= musb_ep->packet_sz) {
827 					musb_ep->dma->desired_mode = 0;
828 				} else {
829 					musb_ep->dma->desired_mode = 1;
830 					/* Mode must be set after DMAENAB */
831 					csr |= MUSB_RXCSR_DMAMODE;
832 					musb_writew(epio, MUSB_RXCSR, csr);
833 				}
834 
835 				if (c->channel_program(channel,
836 							musb_ep->packet_sz,
837 							channel->desired_mode,
838 							request->dma
839 							+ request->actual,
840 							transfer_size))
841 
842 					return;
843 			}
844 #endif	/* Mentor's DMA */
845 
846 			fifo_count = request->length - request->actual;
847 			dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
848 					musb_ep->end_point.name,
849 					len, fifo_count,
850 					musb_ep->packet_sz);
851 
852 			fifo_count = min_t(unsigned, len, fifo_count);
853 
854 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
855 			if (tusb_dma_omap() && is_buffer_mapped(req)) {
856 				struct dma_controller *c = musb->dma_controller;
857 				struct dma_channel *channel = musb_ep->dma;
858 				u32 dma_addr = request->dma + request->actual;
859 				int ret;
860 
861 				ret = c->channel_program(channel,
862 						musb_ep->packet_sz,
863 						channel->desired_mode,
864 						dma_addr,
865 						fifo_count);
866 				if (ret)
867 					return;
868 			}
869 #endif
870 			/*
871 			 * Unmap the dma buffer back to cpu if dma channel
872 			 * programming fails. This buffer is mapped if the
873 			 * channel allocation is successful
874 			 */
875 			 if (is_buffer_mapped(req)) {
876 				unmap_dma_buffer(req, musb);
877 
878 				/*
879 				 * Clear DMAENAB and AUTOCLEAR for the
880 				 * PIO mode transfer
881 				 */
882 				csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
883 				musb_writew(epio, MUSB_RXCSR, csr);
884 			}
885 
886 			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
887 					(request->buf + request->actual));
888 			request->actual += fifo_count;
889 
890 			/* REVISIT if we left anything in the fifo, flush
891 			 * it and report -EOVERFLOW
892 			 */
893 
894 			/* ack the read! */
895 			csr |= MUSB_RXCSR_P_WZC_BITS;
896 			csr &= ~MUSB_RXCSR_RXPKTRDY;
897 			musb_writew(epio, MUSB_RXCSR, csr);
898 		}
899 	}
900 
901 	/* reach the end or short packet detected */
902 	if (request->actual == request->length || len < musb_ep->packet_sz)
903 		musb_g_giveback(musb_ep, request, 0);
904 }
905 
906 /*
907  * Data ready for a request; called from IRQ
908  */
musb_g_rx(struct musb * musb,u8 epnum)909 void musb_g_rx(struct musb *musb, u8 epnum)
910 {
911 	u16			csr;
912 	struct musb_request	*req;
913 	struct usb_request	*request;
914 	void __iomem		*mbase = musb->mregs;
915 	struct musb_ep		*musb_ep;
916 	void __iomem		*epio = musb->endpoints[epnum].regs;
917 	struct dma_channel	*dma;
918 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
919 
920 	if (hw_ep->is_shared_fifo)
921 		musb_ep = &hw_ep->ep_in;
922 	else
923 		musb_ep = &hw_ep->ep_out;
924 
925 	musb_ep_select(mbase, epnum);
926 
927 	req = next_request(musb_ep);
928 	if (!req)
929 		return;
930 
931 	request = &req->request;
932 
933 	csr = musb_readw(epio, MUSB_RXCSR);
934 	dma = is_dma_capable() ? musb_ep->dma : NULL;
935 
936 	dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
937 			csr, dma ? " (dma)" : "", request);
938 
939 	if (csr & MUSB_RXCSR_P_SENTSTALL) {
940 		csr |= MUSB_RXCSR_P_WZC_BITS;
941 		csr &= ~MUSB_RXCSR_P_SENTSTALL;
942 		musb_writew(epio, MUSB_RXCSR, csr);
943 		return;
944 	}
945 
946 	if (csr & MUSB_RXCSR_P_OVERRUN) {
947 		/* csr |= MUSB_RXCSR_P_WZC_BITS; */
948 		csr &= ~MUSB_RXCSR_P_OVERRUN;
949 		musb_writew(epio, MUSB_RXCSR, csr);
950 
951 		dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
952 		if (request->status == -EINPROGRESS)
953 			request->status = -EOVERFLOW;
954 	}
955 	if (csr & MUSB_RXCSR_INCOMPRX) {
956 		/* REVISIT not necessarily an error */
957 		dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
958 	}
959 
960 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
961 		/* "should not happen"; likely RXPKTRDY pending for DMA */
962 		dev_dbg(musb->controller, "%s busy, csr %04x\n",
963 			musb_ep->end_point.name, csr);
964 		return;
965 	}
966 
967 	if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
968 		csr &= ~(MUSB_RXCSR_AUTOCLEAR
969 				| MUSB_RXCSR_DMAENAB
970 				| MUSB_RXCSR_DMAMODE);
971 		musb_writew(epio, MUSB_RXCSR,
972 			MUSB_RXCSR_P_WZC_BITS | csr);
973 
974 		request->actual += musb_ep->dma->actual_len;
975 
976 		dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
977 			epnum, csr,
978 			musb_readw(epio, MUSB_RXCSR),
979 			musb_ep->dma->actual_len, request);
980 
981 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
982 	defined(CONFIG_USB_UX500_DMA)
983 		/* Autoclear doesn't clear RxPktRdy for short packets */
984 		if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
985 				|| (dma->actual_len
986 					& (musb_ep->packet_sz - 1))) {
987 			/* ack the read! */
988 			csr &= ~MUSB_RXCSR_RXPKTRDY;
989 			musb_writew(epio, MUSB_RXCSR, csr);
990 		}
991 
992 		/* incomplete, and not short? wait for next IN packet */
993 		if ((request->actual < request->length)
994 				&& (musb_ep->dma->actual_len
995 					== musb_ep->packet_sz)) {
996 			/* In double buffer case, continue to unload fifo if
997  			 * there is Rx packet in FIFO.
998  			 **/
999 			csr = musb_readw(epio, MUSB_RXCSR);
1000 			if ((csr & MUSB_RXCSR_RXPKTRDY) &&
1001 				hw_ep->rx_double_buffered)
1002 				goto exit;
1003 			return;
1004 		}
1005 #endif
1006 		musb_g_giveback(musb_ep, request, 0);
1007 		/*
1008 		 * In the giveback function the MUSB lock is
1009 		 * released and acquired after sometime. During
1010 		 * this time period the INDEX register could get
1011 		 * changed by the gadget_queue function especially
1012 		 * on SMP systems. Reselect the INDEX to be sure
1013 		 * we are reading/modifying the right registers
1014 		 */
1015 		musb_ep_select(mbase, epnum);
1016 
1017 		req = next_request(musb_ep);
1018 		if (!req)
1019 			return;
1020 	}
1021 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
1022 	defined(CONFIG_USB_UX500_DMA)
1023 exit:
1024 #endif
1025 	/* Analyze request */
1026 	rxstate(musb, req);
1027 }
1028 
1029 /* ------------------------------------------------------------ */
1030 
musb_gadget_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)1031 static int musb_gadget_enable(struct usb_ep *ep,
1032 			const struct usb_endpoint_descriptor *desc)
1033 {
1034 	unsigned long		flags;
1035 	struct musb_ep		*musb_ep;
1036 	struct musb_hw_ep	*hw_ep;
1037 	void __iomem		*regs;
1038 	struct musb		*musb;
1039 	void __iomem	*mbase;
1040 	u8		epnum;
1041 	u16		csr;
1042 	unsigned	tmp;
1043 	int		status = -EINVAL;
1044 
1045 	if (!ep || !desc)
1046 		return -EINVAL;
1047 
1048 	musb_ep = to_musb_ep(ep);
1049 	hw_ep = musb_ep->hw_ep;
1050 	regs = hw_ep->regs;
1051 	musb = musb_ep->musb;
1052 	mbase = musb->mregs;
1053 	epnum = musb_ep->current_epnum;
1054 
1055 	spin_lock_irqsave(&musb->lock, flags);
1056 
1057 	if (musb_ep->desc) {
1058 		status = -EBUSY;
1059 		goto fail;
1060 	}
1061 	musb_ep->type = usb_endpoint_type(desc);
1062 
1063 	/* check direction and (later) maxpacket size against endpoint */
1064 	if (usb_endpoint_num(desc) != epnum)
1065 		goto fail;
1066 
1067 	/* REVISIT this rules out high bandwidth periodic transfers */
1068 	tmp = usb_endpoint_maxp(desc);
1069 	if (tmp & ~0x07ff) {
1070 		int ok;
1071 
1072 		if (usb_endpoint_dir_in(desc))
1073 			ok = musb->hb_iso_tx;
1074 		else
1075 			ok = musb->hb_iso_rx;
1076 
1077 		if (!ok) {
1078 			dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1079 			goto fail;
1080 		}
1081 		musb_ep->hb_mult = (tmp >> 11) & 3;
1082 	} else {
1083 		musb_ep->hb_mult = 0;
1084 	}
1085 
1086 	musb_ep->packet_sz = tmp & 0x7ff;
1087 	tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1088 
1089 	/* enable the interrupts for the endpoint, set the endpoint
1090 	 * packet size (or fail), set the mode, clear the fifo
1091 	 */
1092 	musb_ep_select(mbase, epnum);
1093 	if (usb_endpoint_dir_in(desc)) {
1094 		u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1095 
1096 		if (hw_ep->is_shared_fifo)
1097 			musb_ep->is_in = 1;
1098 		if (!musb_ep->is_in)
1099 			goto fail;
1100 
1101 		if (tmp > hw_ep->max_packet_sz_tx) {
1102 			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1103 			goto fail;
1104 		}
1105 
1106 		int_txe |= (1 << epnum);
1107 		musb_writew(mbase, MUSB_INTRTXE, int_txe);
1108 
1109 		/* REVISIT if can_bulk_split(), use by updating "tmp";
1110 		 * likewise high bandwidth periodic tx
1111 		 */
1112 		/* Set TXMAXP with the FIFO size of the endpoint
1113 		 * to disable double buffering mode.
1114 		 */
1115 		if (musb->double_buffer_not_ok) {
1116 			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1117 		} else {
1118 			if (can_bulk_split(musb, musb_ep->type))
1119 				musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
1120 							musb_ep->packet_sz) - 1;
1121 			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1122 					| (musb_ep->hb_mult << 11));
1123 		}
1124 
1125 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1126 		if (musb_readw(regs, MUSB_TXCSR)
1127 				& MUSB_TXCSR_FIFONOTEMPTY)
1128 			csr |= MUSB_TXCSR_FLUSHFIFO;
1129 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1130 			csr |= MUSB_TXCSR_P_ISO;
1131 
1132 		/* set twice in case of double buffering */
1133 		musb_writew(regs, MUSB_TXCSR, csr);
1134 		/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1135 		musb_writew(regs, MUSB_TXCSR, csr);
1136 
1137 	} else {
1138 		u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1139 
1140 		if (hw_ep->is_shared_fifo)
1141 			musb_ep->is_in = 0;
1142 		if (musb_ep->is_in)
1143 			goto fail;
1144 
1145 		if (tmp > hw_ep->max_packet_sz_rx) {
1146 			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1147 			goto fail;
1148 		}
1149 
1150 		int_rxe |= (1 << epnum);
1151 		musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1152 
1153 		/* REVISIT if can_bulk_combine() use by updating "tmp"
1154 		 * likewise high bandwidth periodic rx
1155 		 */
1156 		/* Set RXMAXP with the FIFO size of the endpoint
1157 		 * to disable double buffering mode.
1158 		 */
1159 		if (musb->double_buffer_not_ok)
1160 			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1161 		else
1162 			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1163 					| (musb_ep->hb_mult << 11));
1164 
1165 		/* force shared fifo to OUT-only mode */
1166 		if (hw_ep->is_shared_fifo) {
1167 			csr = musb_readw(regs, MUSB_TXCSR);
1168 			csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1169 			musb_writew(regs, MUSB_TXCSR, csr);
1170 		}
1171 
1172 		csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1173 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1174 			csr |= MUSB_RXCSR_P_ISO;
1175 		else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1176 			csr |= MUSB_RXCSR_DISNYET;
1177 
1178 		/* set twice in case of double buffering */
1179 		musb_writew(regs, MUSB_RXCSR, csr);
1180 		musb_writew(regs, MUSB_RXCSR, csr);
1181 	}
1182 
1183 	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
1184 	 * for some reason you run out of channels here.
1185 	 */
1186 	if (is_dma_capable() && musb->dma_controller) {
1187 		struct dma_controller	*c = musb->dma_controller;
1188 
1189 		musb_ep->dma = c->channel_alloc(c, hw_ep,
1190 				(desc->bEndpointAddress & USB_DIR_IN));
1191 	} else
1192 		musb_ep->dma = NULL;
1193 
1194 	musb_ep->desc = desc;
1195 	musb_ep->busy = 0;
1196 	musb_ep->wedged = 0;
1197 	status = 0;
1198 
1199 	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1200 			musb_driver_name, musb_ep->end_point.name,
1201 			({ char *s; switch (musb_ep->type) {
1202 			case USB_ENDPOINT_XFER_BULK:	s = "bulk"; break;
1203 			case USB_ENDPOINT_XFER_INT:	s = "int"; break;
1204 			default:			s = "iso"; break;
1205 			}; s; }),
1206 			musb_ep->is_in ? "IN" : "OUT",
1207 			musb_ep->dma ? "dma, " : "",
1208 			musb_ep->packet_sz);
1209 
1210 	schedule_work(&musb->irq_work);
1211 
1212 fail:
1213 	spin_unlock_irqrestore(&musb->lock, flags);
1214 	return status;
1215 }
1216 
1217 /*
1218  * Disable an endpoint flushing all requests queued.
1219  */
musb_gadget_disable(struct usb_ep * ep)1220 static int musb_gadget_disable(struct usb_ep *ep)
1221 {
1222 	unsigned long	flags;
1223 	struct musb	*musb;
1224 	u8		epnum;
1225 	struct musb_ep	*musb_ep;
1226 	void __iomem	*epio;
1227 	int		status = 0;
1228 
1229 	musb_ep = to_musb_ep(ep);
1230 	musb = musb_ep->musb;
1231 	epnum = musb_ep->current_epnum;
1232 	epio = musb->endpoints[epnum].regs;
1233 
1234 	spin_lock_irqsave(&musb->lock, flags);
1235 	musb_ep_select(musb->mregs, epnum);
1236 
1237 	/* zero the endpoint sizes */
1238 	if (musb_ep->is_in) {
1239 		u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1240 		int_txe &= ~(1 << epnum);
1241 		musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1242 		musb_writew(epio, MUSB_TXMAXP, 0);
1243 	} else {
1244 		u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1245 		int_rxe &= ~(1 << epnum);
1246 		musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1247 		musb_writew(epio, MUSB_RXMAXP, 0);
1248 	}
1249 
1250 	musb_ep->desc = NULL;
1251 	musb_ep->end_point.desc = NULL;
1252 
1253 	/* abort all pending DMA and requests */
1254 	nuke(musb_ep, -ESHUTDOWN);
1255 
1256 	schedule_work(&musb->irq_work);
1257 
1258 	spin_unlock_irqrestore(&(musb->lock), flags);
1259 
1260 	dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1261 
1262 	return status;
1263 }
1264 
1265 /*
1266  * Allocate a request for an endpoint.
1267  * Reused by ep0 code.
1268  */
musb_alloc_request(struct usb_ep * ep,gfp_t gfp_flags)1269 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1270 {
1271 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1272 	struct musb		*musb = musb_ep->musb;
1273 	struct musb_request	*request = NULL;
1274 
1275 	request = kzalloc(sizeof *request, gfp_flags);
1276 	if (!request) {
1277 		dev_dbg(musb->controller, "not enough memory\n");
1278 		return NULL;
1279 	}
1280 
1281 	request->request.dma = DMA_ADDR_INVALID;
1282 	request->epnum = musb_ep->current_epnum;
1283 	request->ep = musb_ep;
1284 
1285 	return &request->request;
1286 }
1287 
1288 /*
1289  * Free a request
1290  * Reused by ep0 code.
1291  */
musb_free_request(struct usb_ep * ep,struct usb_request * req)1292 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1293 {
1294 	kfree(to_musb_request(req));
1295 }
1296 
1297 static LIST_HEAD(buffers);
1298 
1299 struct free_record {
1300 	struct list_head	list;
1301 	struct device		*dev;
1302 	unsigned		bytes;
1303 	dma_addr_t		dma;
1304 };
1305 
1306 /*
1307  * Context: controller locked, IRQs blocked.
1308  */
musb_ep_restart(struct musb * musb,struct musb_request * req)1309 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1310 {
1311 	dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1312 		req->tx ? "TX/IN" : "RX/OUT",
1313 		&req->request, req->request.length, req->epnum);
1314 
1315 	musb_ep_select(musb->mregs, req->epnum);
1316 	if (req->tx)
1317 		txstate(musb, req);
1318 	else
1319 		rxstate(musb, req);
1320 }
1321 
musb_gadget_queue(struct usb_ep * ep,struct usb_request * req,gfp_t gfp_flags)1322 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1323 			gfp_t gfp_flags)
1324 {
1325 	struct musb_ep		*musb_ep;
1326 	struct musb_request	*request;
1327 	struct musb		*musb;
1328 	int			status = 0;
1329 	unsigned long		lockflags;
1330 
1331 	if (!ep || !req)
1332 		return -EINVAL;
1333 	if (!req->buf)
1334 		return -ENODATA;
1335 
1336 	musb_ep = to_musb_ep(ep);
1337 	musb = musb_ep->musb;
1338 
1339 	request = to_musb_request(req);
1340 	request->musb = musb;
1341 
1342 	if (request->ep != musb_ep)
1343 		return -EINVAL;
1344 
1345 	dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1346 
1347 	/* request is mine now... */
1348 	request->request.actual = 0;
1349 	request->request.status = -EINPROGRESS;
1350 	request->epnum = musb_ep->current_epnum;
1351 	request->tx = musb_ep->is_in;
1352 
1353 	map_dma_buffer(request, musb, musb_ep);
1354 
1355 	spin_lock_irqsave(&musb->lock, lockflags);
1356 
1357 	/* don't queue if the ep is down */
1358 	if (!musb_ep->desc) {
1359 		dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1360 				req, ep->name, "disabled");
1361 		status = -ESHUTDOWN;
1362 		goto cleanup;
1363 	}
1364 
1365 	/* add request to the list */
1366 	list_add_tail(&request->list, &musb_ep->req_list);
1367 
1368 	/* it this is the head of the queue, start i/o ... */
1369 	if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1370 		musb_ep_restart(musb, request);
1371 
1372 cleanup:
1373 	spin_unlock_irqrestore(&musb->lock, lockflags);
1374 	return status;
1375 }
1376 
musb_gadget_dequeue(struct usb_ep * ep,struct usb_request * request)1377 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1378 {
1379 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1380 	struct musb_request	*req = to_musb_request(request);
1381 	struct musb_request	*r;
1382 	unsigned long		flags;
1383 	int			status = 0;
1384 	struct musb		*musb = musb_ep->musb;
1385 
1386 	if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1387 		return -EINVAL;
1388 
1389 	spin_lock_irqsave(&musb->lock, flags);
1390 
1391 	list_for_each_entry(r, &musb_ep->req_list, list) {
1392 		if (r == req)
1393 			break;
1394 	}
1395 	if (r != req) {
1396 		dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1397 		status = -EINVAL;
1398 		goto done;
1399 	}
1400 
1401 	/* if the hardware doesn't have the request, easy ... */
1402 	if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1403 		musb_g_giveback(musb_ep, request, -ECONNRESET);
1404 
1405 	/* ... else abort the dma transfer ... */
1406 	else if (is_dma_capable() && musb_ep->dma) {
1407 		struct dma_controller	*c = musb->dma_controller;
1408 
1409 		musb_ep_select(musb->mregs, musb_ep->current_epnum);
1410 		if (c->channel_abort)
1411 			status = c->channel_abort(musb_ep->dma);
1412 		else
1413 			status = -EBUSY;
1414 		if (status == 0)
1415 			musb_g_giveback(musb_ep, request, -ECONNRESET);
1416 	} else {
1417 		/* NOTE: by sticking to easily tested hardware/driver states,
1418 		 * we leave counting of in-flight packets imprecise.
1419 		 */
1420 		musb_g_giveback(musb_ep, request, -ECONNRESET);
1421 	}
1422 
1423 done:
1424 	spin_unlock_irqrestore(&musb->lock, flags);
1425 	return status;
1426 }
1427 
1428 /*
1429  * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1430  * data but will queue requests.
1431  *
1432  * exported to ep0 code
1433  */
musb_gadget_set_halt(struct usb_ep * ep,int value)1434 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1435 {
1436 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1437 	u8			epnum = musb_ep->current_epnum;
1438 	struct musb		*musb = musb_ep->musb;
1439 	void __iomem		*epio = musb->endpoints[epnum].regs;
1440 	void __iomem		*mbase;
1441 	unsigned long		flags;
1442 	u16			csr;
1443 	struct musb_request	*request;
1444 	int			status = 0;
1445 
1446 	if (!ep)
1447 		return -EINVAL;
1448 	mbase = musb->mregs;
1449 
1450 	spin_lock_irqsave(&musb->lock, flags);
1451 
1452 	if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1453 		status = -EINVAL;
1454 		goto done;
1455 	}
1456 
1457 	musb_ep_select(mbase, epnum);
1458 
1459 	request = next_request(musb_ep);
1460 	if (value) {
1461 		if (request) {
1462 			dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1463 			    ep->name);
1464 			status = -EAGAIN;
1465 			goto done;
1466 		}
1467 		/* Cannot portably stall with non-empty FIFO */
1468 		if (musb_ep->is_in) {
1469 			csr = musb_readw(epio, MUSB_TXCSR);
1470 			if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1471 				dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1472 				status = -EAGAIN;
1473 				goto done;
1474 			}
1475 		}
1476 	} else
1477 		musb_ep->wedged = 0;
1478 
1479 	/* set/clear the stall and toggle bits */
1480 	dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1481 	if (musb_ep->is_in) {
1482 		csr = musb_readw(epio, MUSB_TXCSR);
1483 		csr |= MUSB_TXCSR_P_WZC_BITS
1484 			| MUSB_TXCSR_CLRDATATOG;
1485 		if (value)
1486 			csr |= MUSB_TXCSR_P_SENDSTALL;
1487 		else
1488 			csr &= ~(MUSB_TXCSR_P_SENDSTALL
1489 				| MUSB_TXCSR_P_SENTSTALL);
1490 		csr &= ~MUSB_TXCSR_TXPKTRDY;
1491 		musb_writew(epio, MUSB_TXCSR, csr);
1492 	} else {
1493 		csr = musb_readw(epio, MUSB_RXCSR);
1494 		csr |= MUSB_RXCSR_P_WZC_BITS
1495 			| MUSB_RXCSR_FLUSHFIFO
1496 			| MUSB_RXCSR_CLRDATATOG;
1497 		if (value)
1498 			csr |= MUSB_RXCSR_P_SENDSTALL;
1499 		else
1500 			csr &= ~(MUSB_RXCSR_P_SENDSTALL
1501 				| MUSB_RXCSR_P_SENTSTALL);
1502 		musb_writew(epio, MUSB_RXCSR, csr);
1503 	}
1504 
1505 	/* maybe start the first request in the queue */
1506 	if (!musb_ep->busy && !value && request) {
1507 		dev_dbg(musb->controller, "restarting the request\n");
1508 		musb_ep_restart(musb, request);
1509 	}
1510 
1511 done:
1512 	spin_unlock_irqrestore(&musb->lock, flags);
1513 	return status;
1514 }
1515 
1516 /*
1517  * Sets the halt feature with the clear requests ignored
1518  */
musb_gadget_set_wedge(struct usb_ep * ep)1519 static int musb_gadget_set_wedge(struct usb_ep *ep)
1520 {
1521 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1522 
1523 	if (!ep)
1524 		return -EINVAL;
1525 
1526 	musb_ep->wedged = 1;
1527 
1528 	return usb_ep_set_halt(ep);
1529 }
1530 
musb_gadget_fifo_status(struct usb_ep * ep)1531 static int musb_gadget_fifo_status(struct usb_ep *ep)
1532 {
1533 	struct musb_ep		*musb_ep = to_musb_ep(ep);
1534 	void __iomem		*epio = musb_ep->hw_ep->regs;
1535 	int			retval = -EINVAL;
1536 
1537 	if (musb_ep->desc && !musb_ep->is_in) {
1538 		struct musb		*musb = musb_ep->musb;
1539 		int			epnum = musb_ep->current_epnum;
1540 		void __iomem		*mbase = musb->mregs;
1541 		unsigned long		flags;
1542 
1543 		spin_lock_irqsave(&musb->lock, flags);
1544 
1545 		musb_ep_select(mbase, epnum);
1546 		/* FIXME return zero unless RXPKTRDY is set */
1547 		retval = musb_readw(epio, MUSB_RXCOUNT);
1548 
1549 		spin_unlock_irqrestore(&musb->lock, flags);
1550 	}
1551 	return retval;
1552 }
1553 
musb_gadget_fifo_flush(struct usb_ep * ep)1554 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1555 {
1556 	struct musb_ep	*musb_ep = to_musb_ep(ep);
1557 	struct musb	*musb = musb_ep->musb;
1558 	u8		epnum = musb_ep->current_epnum;
1559 	void __iomem	*epio = musb->endpoints[epnum].regs;
1560 	void __iomem	*mbase;
1561 	unsigned long	flags;
1562 	u16		csr, int_txe;
1563 
1564 	mbase = musb->mregs;
1565 
1566 	spin_lock_irqsave(&musb->lock, flags);
1567 	musb_ep_select(mbase, (u8) epnum);
1568 
1569 	/* disable interrupts */
1570 	int_txe = musb_readw(mbase, MUSB_INTRTXE);
1571 	musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1572 
1573 	if (musb_ep->is_in) {
1574 		csr = musb_readw(epio, MUSB_TXCSR);
1575 		if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1576 			csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1577 			/*
1578 			 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1579 			 * to interrupt current FIFO loading, but not flushing
1580 			 * the already loaded ones.
1581 			 */
1582 			csr &= ~MUSB_TXCSR_TXPKTRDY;
1583 			musb_writew(epio, MUSB_TXCSR, csr);
1584 			/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1585 			musb_writew(epio, MUSB_TXCSR, csr);
1586 		}
1587 	} else {
1588 		csr = musb_readw(epio, MUSB_RXCSR);
1589 		csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1590 		musb_writew(epio, MUSB_RXCSR, csr);
1591 		musb_writew(epio, MUSB_RXCSR, csr);
1592 	}
1593 
1594 	/* re-enable interrupt */
1595 	musb_writew(mbase, MUSB_INTRTXE, int_txe);
1596 	spin_unlock_irqrestore(&musb->lock, flags);
1597 }
1598 
1599 static const struct usb_ep_ops musb_ep_ops = {
1600 	.enable		= musb_gadget_enable,
1601 	.disable	= musb_gadget_disable,
1602 	.alloc_request	= musb_alloc_request,
1603 	.free_request	= musb_free_request,
1604 	.queue		= musb_gadget_queue,
1605 	.dequeue	= musb_gadget_dequeue,
1606 	.set_halt	= musb_gadget_set_halt,
1607 	.set_wedge	= musb_gadget_set_wedge,
1608 	.fifo_status	= musb_gadget_fifo_status,
1609 	.fifo_flush	= musb_gadget_fifo_flush
1610 };
1611 
1612 /* ----------------------------------------------------------------------- */
1613 
musb_gadget_get_frame(struct usb_gadget * gadget)1614 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1615 {
1616 	struct musb	*musb = gadget_to_musb(gadget);
1617 
1618 	return (int)musb_readw(musb->mregs, MUSB_FRAME);
1619 }
1620 
musb_gadget_wakeup(struct usb_gadget * gadget)1621 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1622 {
1623 	struct musb	*musb = gadget_to_musb(gadget);
1624 	void __iomem	*mregs = musb->mregs;
1625 	unsigned long	flags;
1626 	int		status = -EINVAL;
1627 	u8		power, devctl;
1628 	int		retries;
1629 
1630 	spin_lock_irqsave(&musb->lock, flags);
1631 
1632 	switch (musb->xceiv->state) {
1633 	case OTG_STATE_B_PERIPHERAL:
1634 		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1635 		 * that's part of the standard usb 1.1 state machine, and
1636 		 * doesn't affect OTG transitions.
1637 		 */
1638 		if (musb->may_wakeup && musb->is_suspended)
1639 			break;
1640 		goto done;
1641 	case OTG_STATE_B_IDLE:
1642 		/* Start SRP ... OTG not required. */
1643 		devctl = musb_readb(mregs, MUSB_DEVCTL);
1644 		dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1645 		devctl |= MUSB_DEVCTL_SESSION;
1646 		musb_writeb(mregs, MUSB_DEVCTL, devctl);
1647 		devctl = musb_readb(mregs, MUSB_DEVCTL);
1648 		retries = 100;
1649 		while (!(devctl & MUSB_DEVCTL_SESSION)) {
1650 			devctl = musb_readb(mregs, MUSB_DEVCTL);
1651 			if (retries-- < 1)
1652 				break;
1653 		}
1654 		retries = 10000;
1655 		while (devctl & MUSB_DEVCTL_SESSION) {
1656 			devctl = musb_readb(mregs, MUSB_DEVCTL);
1657 			if (retries-- < 1)
1658 				break;
1659 		}
1660 
1661 		spin_unlock_irqrestore(&musb->lock, flags);
1662 		otg_start_srp(musb->xceiv->otg);
1663 		spin_lock_irqsave(&musb->lock, flags);
1664 
1665 		/* Block idling for at least 1s */
1666 		musb_platform_try_idle(musb,
1667 			jiffies + msecs_to_jiffies(1 * HZ));
1668 
1669 		status = 0;
1670 		goto done;
1671 	default:
1672 		dev_dbg(musb->controller, "Unhandled wake: %s\n",
1673 			otg_state_string(musb->xceiv->state));
1674 		goto done;
1675 	}
1676 
1677 	status = 0;
1678 
1679 	power = musb_readb(mregs, MUSB_POWER);
1680 	power |= MUSB_POWER_RESUME;
1681 	musb_writeb(mregs, MUSB_POWER, power);
1682 	dev_dbg(musb->controller, "issue wakeup\n");
1683 
1684 	/* FIXME do this next chunk in a timer callback, no udelay */
1685 	mdelay(2);
1686 
1687 	power = musb_readb(mregs, MUSB_POWER);
1688 	power &= ~MUSB_POWER_RESUME;
1689 	musb_writeb(mregs, MUSB_POWER, power);
1690 done:
1691 	spin_unlock_irqrestore(&musb->lock, flags);
1692 	return status;
1693 }
1694 
1695 static int
musb_gadget_set_self_powered(struct usb_gadget * gadget,int is_selfpowered)1696 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1697 {
1698 	struct musb	*musb = gadget_to_musb(gadget);
1699 
1700 	musb->is_self_powered = !!is_selfpowered;
1701 	return 0;
1702 }
1703 
musb_pullup(struct musb * musb,int is_on)1704 static void musb_pullup(struct musb *musb, int is_on)
1705 {
1706 	u8 power;
1707 
1708 	power = musb_readb(musb->mregs, MUSB_POWER);
1709 	if (is_on)
1710 		power |= MUSB_POWER_SOFTCONN;
1711 	else
1712 		power &= ~MUSB_POWER_SOFTCONN;
1713 
1714 	/* FIXME if on, HdrcStart; if off, HdrcStop */
1715 
1716 	dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1717 		is_on ? "on" : "off");
1718 	musb_writeb(musb->mregs, MUSB_POWER, power);
1719 }
1720 
1721 #if 0
1722 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1723 {
1724 	dev_dbg(musb->controller, "<= %s =>\n", __func__);
1725 
1726 	/*
1727 	 * FIXME iff driver's softconnect flag is set (as it is during probe,
1728 	 * though that can clear it), just musb_pullup().
1729 	 */
1730 
1731 	return -EINVAL;
1732 }
1733 #endif
1734 
musb_gadget_vbus_draw(struct usb_gadget * gadget,unsigned mA)1735 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1736 {
1737 	struct musb	*musb = gadget_to_musb(gadget);
1738 
1739 	if (!musb->xceiv->set_power)
1740 		return -EOPNOTSUPP;
1741 	return usb_phy_set_power(musb->xceiv, mA);
1742 }
1743 
musb_gadget_pullup(struct usb_gadget * gadget,int is_on)1744 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1745 {
1746 	struct musb	*musb = gadget_to_musb(gadget);
1747 	unsigned long	flags;
1748 
1749 	is_on = !!is_on;
1750 
1751 	pm_runtime_get_sync(musb->controller);
1752 
1753 	/* NOTE: this assumes we are sensing vbus; we'd rather
1754 	 * not pullup unless the B-session is active.
1755 	 */
1756 	spin_lock_irqsave(&musb->lock, flags);
1757 	if (is_on != musb->softconnect) {
1758 		musb->softconnect = is_on;
1759 		musb_pullup(musb, is_on);
1760 	}
1761 	spin_unlock_irqrestore(&musb->lock, flags);
1762 
1763 	pm_runtime_put(musb->controller);
1764 
1765 	return 0;
1766 }
1767 
1768 static int musb_gadget_start(struct usb_gadget *g,
1769 		struct usb_gadget_driver *driver);
1770 static int musb_gadget_stop(struct usb_gadget *g,
1771 		struct usb_gadget_driver *driver);
1772 
1773 static const struct usb_gadget_ops musb_gadget_operations = {
1774 	.get_frame		= musb_gadget_get_frame,
1775 	.wakeup			= musb_gadget_wakeup,
1776 	.set_selfpowered	= musb_gadget_set_self_powered,
1777 	/* .vbus_session		= musb_gadget_vbus_session, */
1778 	.vbus_draw		= musb_gadget_vbus_draw,
1779 	.pullup			= musb_gadget_pullup,
1780 	.udc_start		= musb_gadget_start,
1781 	.udc_stop		= musb_gadget_stop,
1782 };
1783 
1784 /* ----------------------------------------------------------------------- */
1785 
1786 /* Registration */
1787 
1788 /* Only this registration code "knows" the rule (from USB standards)
1789  * about there being only one external upstream port.  It assumes
1790  * all peripheral ports are external...
1791  */
1792 
musb_gadget_release(struct device * dev)1793 static void musb_gadget_release(struct device *dev)
1794 {
1795 	/* kref_put(WHAT) */
1796 	dev_dbg(dev, "%s\n", __func__);
1797 }
1798 
1799 
1800 static void __devinit
init_peripheral_ep(struct musb * musb,struct musb_ep * ep,u8 epnum,int is_in)1801 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1802 {
1803 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1804 
1805 	memset(ep, 0, sizeof *ep);
1806 
1807 	ep->current_epnum = epnum;
1808 	ep->musb = musb;
1809 	ep->hw_ep = hw_ep;
1810 	ep->is_in = is_in;
1811 
1812 	INIT_LIST_HEAD(&ep->req_list);
1813 
1814 	sprintf(ep->name, "ep%d%s", epnum,
1815 			(!epnum || hw_ep->is_shared_fifo) ? "" : (
1816 				is_in ? "in" : "out"));
1817 	ep->end_point.name = ep->name;
1818 	INIT_LIST_HEAD(&ep->end_point.ep_list);
1819 	if (!epnum) {
1820 		ep->end_point.maxpacket = 64;
1821 		ep->end_point.ops = &musb_g_ep0_ops;
1822 		musb->g.ep0 = &ep->end_point;
1823 	} else {
1824 		if (is_in)
1825 			ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1826 		else
1827 			ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1828 		ep->end_point.ops = &musb_ep_ops;
1829 		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1830 	}
1831 }
1832 
1833 /*
1834  * Initialize the endpoints exposed to peripheral drivers, with backlinks
1835  * to the rest of the driver state.
1836  */
musb_g_init_endpoints(struct musb * musb)1837 static inline void __devinit musb_g_init_endpoints(struct musb *musb)
1838 {
1839 	u8			epnum;
1840 	struct musb_hw_ep	*hw_ep;
1841 	unsigned		count = 0;
1842 
1843 	/* initialize endpoint list just once */
1844 	INIT_LIST_HEAD(&(musb->g.ep_list));
1845 
1846 	for (epnum = 0, hw_ep = musb->endpoints;
1847 			epnum < musb->nr_endpoints;
1848 			epnum++, hw_ep++) {
1849 		if (hw_ep->is_shared_fifo /* || !epnum */) {
1850 			init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1851 			count++;
1852 		} else {
1853 			if (hw_ep->max_packet_sz_tx) {
1854 				init_peripheral_ep(musb, &hw_ep->ep_in,
1855 							epnum, 1);
1856 				count++;
1857 			}
1858 			if (hw_ep->max_packet_sz_rx) {
1859 				init_peripheral_ep(musb, &hw_ep->ep_out,
1860 							epnum, 0);
1861 				count++;
1862 			}
1863 		}
1864 	}
1865 }
1866 
1867 /* called once during driver setup to initialize and link into
1868  * the driver model; memory is zeroed.
1869  */
musb_gadget_setup(struct musb * musb)1870 int __devinit musb_gadget_setup(struct musb *musb)
1871 {
1872 	int status;
1873 
1874 	/* REVISIT minor race:  if (erroneously) setting up two
1875 	 * musb peripherals at the same time, only the bus lock
1876 	 * is probably held.
1877 	 */
1878 
1879 	musb->g.ops = &musb_gadget_operations;
1880 	musb->g.max_speed = USB_SPEED_HIGH;
1881 	musb->g.speed = USB_SPEED_UNKNOWN;
1882 
1883 	/* this "gadget" abstracts/virtualizes the controller */
1884 	dev_set_name(&musb->g.dev, "gadget");
1885 	musb->g.dev.parent = musb->controller;
1886 	musb->g.dev.dma_mask = musb->controller->dma_mask;
1887 	musb->g.dev.release = musb_gadget_release;
1888 	musb->g.name = musb_driver_name;
1889 
1890 	if (is_otg_enabled(musb))
1891 		musb->g.is_otg = 1;
1892 
1893 	musb_g_init_endpoints(musb);
1894 
1895 	musb->is_active = 0;
1896 	musb_platform_try_idle(musb, 0);
1897 
1898 	status = device_register(&musb->g.dev);
1899 	if (status != 0) {
1900 		put_device(&musb->g.dev);
1901 		return status;
1902 	}
1903 	status = usb_add_gadget_udc(musb->controller, &musb->g);
1904 	if (status)
1905 		goto err;
1906 
1907 	return 0;
1908 err:
1909 	musb->g.dev.parent = NULL;
1910 	device_unregister(&musb->g.dev);
1911 	return status;
1912 }
1913 
musb_gadget_cleanup(struct musb * musb)1914 void musb_gadget_cleanup(struct musb *musb)
1915 {
1916 	usb_del_gadget_udc(&musb->g);
1917 	if (musb->g.dev.parent)
1918 		device_unregister(&musb->g.dev);
1919 }
1920 
1921 /*
1922  * Register the gadget driver. Used by gadget drivers when
1923  * registering themselves with the controller.
1924  *
1925  * -EINVAL something went wrong (not driver)
1926  * -EBUSY another gadget is already using the controller
1927  * -ENOMEM no memory to perform the operation
1928  *
1929  * @param driver the gadget driver
1930  * @return <0 if error, 0 if everything is fine
1931  */
musb_gadget_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1932 static int musb_gadget_start(struct usb_gadget *g,
1933 		struct usb_gadget_driver *driver)
1934 {
1935 	struct musb		*musb = gadget_to_musb(g);
1936 	struct usb_otg		*otg = musb->xceiv->otg;
1937 	unsigned long		flags;
1938 	int			retval = -EINVAL;
1939 
1940 	if (driver->max_speed < USB_SPEED_HIGH)
1941 		goto err0;
1942 
1943 	pm_runtime_get_sync(musb->controller);
1944 
1945 	dev_dbg(musb->controller, "registering driver %s\n", driver->function);
1946 
1947 	musb->softconnect = 0;
1948 	musb->gadget_driver = driver;
1949 
1950 	spin_lock_irqsave(&musb->lock, flags);
1951 	musb->is_active = 1;
1952 
1953 	otg_set_peripheral(otg, &musb->g);
1954 	musb->xceiv->state = OTG_STATE_B_IDLE;
1955 
1956 	/*
1957 	 * FIXME this ignores the softconnect flag.  Drivers are
1958 	 * allowed hold the peripheral inactive until for example
1959 	 * userspace hooks up printer hardware or DSP codecs, so
1960 	 * hosts only see fully functional devices.
1961 	 */
1962 
1963 	if (!is_otg_enabled(musb))
1964 		musb_start(musb);
1965 
1966 	spin_unlock_irqrestore(&musb->lock, flags);
1967 
1968 	if (is_otg_enabled(musb)) {
1969 		struct usb_hcd	*hcd = musb_to_hcd(musb);
1970 
1971 		dev_dbg(musb->controller, "OTG startup...\n");
1972 
1973 		/* REVISIT:  funcall to other code, which also
1974 		 * handles power budgeting ... this way also
1975 		 * ensures HdrcStart is indirectly called.
1976 		 */
1977 		retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
1978 		if (retval < 0) {
1979 			dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
1980 			goto err2;
1981 		}
1982 
1983 		if ((musb->xceiv->last_event == USB_EVENT_ID)
1984 					&& otg->set_vbus)
1985 			otg_set_vbus(otg, 1);
1986 
1987 		hcd->self.uses_pio_for_control = 1;
1988 	}
1989 	if (musb->xceiv->last_event == USB_EVENT_NONE)
1990 		pm_runtime_put(musb->controller);
1991 
1992 	return 0;
1993 
1994 err2:
1995 	if (!is_otg_enabled(musb))
1996 		musb_stop(musb);
1997 err0:
1998 	return retval;
1999 }
2000 
stop_activity(struct musb * musb,struct usb_gadget_driver * driver)2001 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
2002 {
2003 	int			i;
2004 	struct musb_hw_ep	*hw_ep;
2005 
2006 	/* don't disconnect if it's not connected */
2007 	if (musb->g.speed == USB_SPEED_UNKNOWN)
2008 		driver = NULL;
2009 	else
2010 		musb->g.speed = USB_SPEED_UNKNOWN;
2011 
2012 	/* deactivate the hardware */
2013 	if (musb->softconnect) {
2014 		musb->softconnect = 0;
2015 		musb_pullup(musb, 0);
2016 	}
2017 	musb_stop(musb);
2018 
2019 	/* killing any outstanding requests will quiesce the driver;
2020 	 * then report disconnect
2021 	 */
2022 	if (driver) {
2023 		for (i = 0, hw_ep = musb->endpoints;
2024 				i < musb->nr_endpoints;
2025 				i++, hw_ep++) {
2026 			musb_ep_select(musb->mregs, i);
2027 			if (hw_ep->is_shared_fifo /* || !epnum */) {
2028 				nuke(&hw_ep->ep_in, -ESHUTDOWN);
2029 			} else {
2030 				if (hw_ep->max_packet_sz_tx)
2031 					nuke(&hw_ep->ep_in, -ESHUTDOWN);
2032 				if (hw_ep->max_packet_sz_rx)
2033 					nuke(&hw_ep->ep_out, -ESHUTDOWN);
2034 			}
2035 		}
2036 	}
2037 }
2038 
2039 /*
2040  * Unregister the gadget driver. Used by gadget drivers when
2041  * unregistering themselves from the controller.
2042  *
2043  * @param driver the gadget driver to unregister
2044  */
musb_gadget_stop(struct usb_gadget * g,struct usb_gadget_driver * driver)2045 static int musb_gadget_stop(struct usb_gadget *g,
2046 		struct usb_gadget_driver *driver)
2047 {
2048 	struct musb	*musb = gadget_to_musb(g);
2049 	unsigned long	flags;
2050 
2051 	if (musb->xceiv->last_event == USB_EVENT_NONE)
2052 		pm_runtime_get_sync(musb->controller);
2053 
2054 	/*
2055 	 * REVISIT always use otg_set_peripheral() here too;
2056 	 * this needs to shut down the OTG engine.
2057 	 */
2058 
2059 	spin_lock_irqsave(&musb->lock, flags);
2060 
2061 	musb_hnp_stop(musb);
2062 
2063 	(void) musb_gadget_vbus_draw(&musb->g, 0);
2064 
2065 	musb->xceiv->state = OTG_STATE_UNDEFINED;
2066 	stop_activity(musb, driver);
2067 	otg_set_peripheral(musb->xceiv->otg, NULL);
2068 
2069 	dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
2070 
2071 	musb->is_active = 0;
2072 	musb_platform_try_idle(musb, 0);
2073 	spin_unlock_irqrestore(&musb->lock, flags);
2074 
2075 	if (is_otg_enabled(musb)) {
2076 		usb_remove_hcd(musb_to_hcd(musb));
2077 		/* FIXME we need to be able to register another
2078 		 * gadget driver here and have everything work;
2079 		 * that currently misbehaves.
2080 		 */
2081 	}
2082 
2083 	if (!is_otg_enabled(musb))
2084 		musb_stop(musb);
2085 
2086 	pm_runtime_put(musb->controller);
2087 
2088 	return 0;
2089 }
2090 
2091 /* ----------------------------------------------------------------------- */
2092 
2093 /* lifecycle operations called through plat_uds.c */
2094 
musb_g_resume(struct musb * musb)2095 void musb_g_resume(struct musb *musb)
2096 {
2097 	musb->is_suspended = 0;
2098 	switch (musb->xceiv->state) {
2099 	case OTG_STATE_B_IDLE:
2100 		break;
2101 	case OTG_STATE_B_WAIT_ACON:
2102 	case OTG_STATE_B_PERIPHERAL:
2103 		musb->is_active = 1;
2104 		if (musb->gadget_driver && musb->gadget_driver->resume) {
2105 			spin_unlock(&musb->lock);
2106 			musb->gadget_driver->resume(&musb->g);
2107 			spin_lock(&musb->lock);
2108 		}
2109 		break;
2110 	default:
2111 		WARNING("unhandled RESUME transition (%s)\n",
2112 				otg_state_string(musb->xceiv->state));
2113 	}
2114 }
2115 
2116 /* called when SOF packets stop for 3+ msec */
musb_g_suspend(struct musb * musb)2117 void musb_g_suspend(struct musb *musb)
2118 {
2119 	u8	devctl;
2120 
2121 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2122 	dev_dbg(musb->controller, "devctl %02x\n", devctl);
2123 
2124 	switch (musb->xceiv->state) {
2125 	case OTG_STATE_B_IDLE:
2126 		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2127 			musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2128 		break;
2129 	case OTG_STATE_B_PERIPHERAL:
2130 		musb->is_suspended = 1;
2131 		if (musb->gadget_driver && musb->gadget_driver->suspend) {
2132 			spin_unlock(&musb->lock);
2133 			musb->gadget_driver->suspend(&musb->g);
2134 			spin_lock(&musb->lock);
2135 		}
2136 		break;
2137 	default:
2138 		/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2139 		 * A_PERIPHERAL may need care too
2140 		 */
2141 		WARNING("unhandled SUSPEND transition (%s)\n",
2142 				otg_state_string(musb->xceiv->state));
2143 	}
2144 }
2145 
2146 /* Called during SRP */
musb_g_wakeup(struct musb * musb)2147 void musb_g_wakeup(struct musb *musb)
2148 {
2149 	musb_gadget_wakeup(&musb->g);
2150 }
2151 
2152 /* called when VBUS drops below session threshold, and in other cases */
musb_g_disconnect(struct musb * musb)2153 void musb_g_disconnect(struct musb *musb)
2154 {
2155 	void __iomem	*mregs = musb->mregs;
2156 	u8	devctl = musb_readb(mregs, MUSB_DEVCTL);
2157 
2158 	dev_dbg(musb->controller, "devctl %02x\n", devctl);
2159 
2160 	/* clear HR */
2161 	musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2162 
2163 	/* don't draw vbus until new b-default session */
2164 	(void) musb_gadget_vbus_draw(&musb->g, 0);
2165 
2166 	musb->g.speed = USB_SPEED_UNKNOWN;
2167 	if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2168 		spin_unlock(&musb->lock);
2169 		musb->gadget_driver->disconnect(&musb->g);
2170 		spin_lock(&musb->lock);
2171 	}
2172 
2173 	switch (musb->xceiv->state) {
2174 	default:
2175 		dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2176 			otg_state_string(musb->xceiv->state));
2177 		musb->xceiv->state = OTG_STATE_A_IDLE;
2178 		MUSB_HST_MODE(musb);
2179 		break;
2180 	case OTG_STATE_A_PERIPHERAL:
2181 		musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2182 		MUSB_HST_MODE(musb);
2183 		break;
2184 	case OTG_STATE_B_WAIT_ACON:
2185 	case OTG_STATE_B_HOST:
2186 	case OTG_STATE_B_PERIPHERAL:
2187 	case OTG_STATE_B_IDLE:
2188 		musb->xceiv->state = OTG_STATE_B_IDLE;
2189 		break;
2190 	case OTG_STATE_B_SRP_INIT:
2191 		break;
2192 	}
2193 
2194 	musb->is_active = 0;
2195 }
2196 
musb_g_reset(struct musb * musb)2197 void musb_g_reset(struct musb *musb)
2198 __releases(musb->lock)
2199 __acquires(musb->lock)
2200 {
2201 	void __iomem	*mbase = musb->mregs;
2202 	u8		devctl = musb_readb(mbase, MUSB_DEVCTL);
2203 	u8		power;
2204 
2205 	dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
2206 			(devctl & MUSB_DEVCTL_BDEVICE)
2207 				? "B-Device" : "A-Device",
2208 			musb_readb(mbase, MUSB_FADDR),
2209 			musb->gadget_driver
2210 				? musb->gadget_driver->driver.name
2211 				: NULL
2212 			);
2213 
2214 	/* report disconnect, if we didn't already (flushing EP state) */
2215 	if (musb->g.speed != USB_SPEED_UNKNOWN)
2216 		musb_g_disconnect(musb);
2217 
2218 	/* clear HR */
2219 	else if (devctl & MUSB_DEVCTL_HR)
2220 		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2221 
2222 
2223 	/* what speed did we negotiate? */
2224 	power = musb_readb(mbase, MUSB_POWER);
2225 	musb->g.speed = (power & MUSB_POWER_HSMODE)
2226 			? USB_SPEED_HIGH : USB_SPEED_FULL;
2227 
2228 	/* start in USB_STATE_DEFAULT */
2229 	musb->is_active = 1;
2230 	musb->is_suspended = 0;
2231 	MUSB_DEV_MODE(musb);
2232 	musb->address = 0;
2233 	musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2234 
2235 	musb->may_wakeup = 0;
2236 	musb->g.b_hnp_enable = 0;
2237 	musb->g.a_alt_hnp_support = 0;
2238 	musb->g.a_hnp_support = 0;
2239 
2240 	/* Normal reset, as B-Device;
2241 	 * or else after HNP, as A-Device
2242 	 */
2243 	if (devctl & MUSB_DEVCTL_BDEVICE) {
2244 		musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2245 		musb->g.is_a_peripheral = 0;
2246 	} else if (is_otg_enabled(musb)) {
2247 		musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2248 		musb->g.is_a_peripheral = 1;
2249 	} else
2250 		WARN_ON(1);
2251 
2252 	/* start with default limits on VBUS power draw */
2253 	(void) musb_gadget_vbus_draw(&musb->g,
2254 			is_otg_enabled(musb) ? 8 : 100);
2255 }
2256