1 /*
2  * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3  * Author: Chao Xie <chao.xie@marvell.com>
4  *	   Neil Zhang <zhangwm@marvell.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16 #include <linux/kernel.h>
17 #include <linux/delay.h>
18 #include <linux/ioport.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/timer.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/moduleparam.h>
27 #include <linux/device.h>
28 #include <linux/usb/ch9.h>
29 #include <linux/usb/gadget.h>
30 #include <linux/usb/otg.h>
31 #include <linux/pm.h>
32 #include <linux/io.h>
33 #include <linux/irq.h>
34 #include <linux/platform_device.h>
35 #include <linux/clk.h>
36 #include <linux/platform_data/mv_usb.h>
37 #include <asm/unaligned.h>
38 
39 #include "mv_udc.h"
40 
41 #define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
42 #define DRIVER_VERSION		"8 Nov 2010"
43 
44 #define ep_dir(ep)	(((ep)->ep_num == 0) ? \
45 				((ep)->udc->ep0_dir) : ((ep)->direction))
46 
47 /* timeout value -- usec */
48 #define RESET_TIMEOUT		10000
49 #define FLUSH_TIMEOUT		10000
50 #define EPSTATUS_TIMEOUT	10000
51 #define PRIME_TIMEOUT		10000
52 #define READSAFE_TIMEOUT	1000
53 #define DTD_TIMEOUT		1000
54 
55 #define LOOPS_USEC_SHIFT	4
56 #define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
57 #define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
58 
59 static DECLARE_COMPLETION(release_done);
60 
61 static const char driver_name[] = "mv_udc";
62 static const char driver_desc[] = DRIVER_DESC;
63 
64 /* controller device global variable */
65 static struct mv_udc	*the_controller;
66 int mv_usb_otgsc;
67 
68 static void nuke(struct mv_ep *ep, int status);
69 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
70 
71 /* for endpoint 0 operations */
72 static const struct usb_endpoint_descriptor mv_ep0_desc = {
73 	.bLength =		USB_DT_ENDPOINT_SIZE,
74 	.bDescriptorType =	USB_DT_ENDPOINT,
75 	.bEndpointAddress =	0,
76 	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
77 	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
78 };
79 
ep0_reset(struct mv_udc * udc)80 static void ep0_reset(struct mv_udc *udc)
81 {
82 	struct mv_ep *ep;
83 	u32 epctrlx;
84 	int i = 0;
85 
86 	/* ep0 in and out */
87 	for (i = 0; i < 2; i++) {
88 		ep = &udc->eps[i];
89 		ep->udc = udc;
90 
91 		/* ep0 dQH */
92 		ep->dqh = &udc->ep_dqh[i];
93 
94 		/* configure ep0 endpoint capabilities in dQH */
95 		ep->dqh->max_packet_length =
96 			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
97 			| EP_QUEUE_HEAD_IOS;
98 
99 		ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
100 
101 		epctrlx = readl(&udc->op_regs->epctrlx[0]);
102 		if (i) {	/* TX */
103 			epctrlx |= EPCTRL_TX_ENABLE
104 				| (USB_ENDPOINT_XFER_CONTROL
105 					<< EPCTRL_TX_EP_TYPE_SHIFT);
106 
107 		} else {	/* RX */
108 			epctrlx |= EPCTRL_RX_ENABLE
109 				| (USB_ENDPOINT_XFER_CONTROL
110 					<< EPCTRL_RX_EP_TYPE_SHIFT);
111 		}
112 
113 		writel(epctrlx, &udc->op_regs->epctrlx[0]);
114 	}
115 }
116 
117 /* protocol ep0 stall, will automatically be cleared on new transaction */
ep0_stall(struct mv_udc * udc)118 static void ep0_stall(struct mv_udc *udc)
119 {
120 	u32	epctrlx;
121 
122 	/* set TX and RX to stall */
123 	epctrlx = readl(&udc->op_regs->epctrlx[0]);
124 	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
125 	writel(epctrlx, &udc->op_regs->epctrlx[0]);
126 
127 	/* update ep0 state */
128 	udc->ep0_state = WAIT_FOR_SETUP;
129 	udc->ep0_dir = EP_DIR_OUT;
130 }
131 
process_ep_req(struct mv_udc * udc,int index,struct mv_req * curr_req)132 static int process_ep_req(struct mv_udc *udc, int index,
133 	struct mv_req *curr_req)
134 {
135 	struct mv_dtd	*curr_dtd;
136 	struct mv_dqh	*curr_dqh;
137 	int td_complete, actual, remaining_length;
138 	int i, direction;
139 	int retval = 0;
140 	u32 errors;
141 	u32 bit_pos;
142 
143 	curr_dqh = &udc->ep_dqh[index];
144 	direction = index % 2;
145 
146 	curr_dtd = curr_req->head;
147 	td_complete = 0;
148 	actual = curr_req->req.length;
149 
150 	for (i = 0; i < curr_req->dtd_count; i++) {
151 		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
152 			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
153 				udc->eps[index].name);
154 			return 1;
155 		}
156 
157 		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
158 		if (!errors) {
159 			remaining_length =
160 				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
161 					>> DTD_LENGTH_BIT_POS;
162 			actual -= remaining_length;
163 
164 			if (remaining_length) {
165 				if (direction) {
166 					dev_dbg(&udc->dev->dev,
167 						"TX dTD remains data\n");
168 					retval = -EPROTO;
169 					break;
170 				} else
171 					break;
172 			}
173 		} else {
174 			dev_info(&udc->dev->dev,
175 				"complete_tr error: ep=%d %s: error = 0x%x\n",
176 				index >> 1, direction ? "SEND" : "RECV",
177 				errors);
178 			if (errors & DTD_STATUS_HALTED) {
179 				/* Clear the errors and Halt condition */
180 				curr_dqh->size_ioc_int_sts &= ~errors;
181 				retval = -EPIPE;
182 			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
183 				retval = -EPROTO;
184 			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
185 				retval = -EILSEQ;
186 			}
187 		}
188 		if (i != curr_req->dtd_count - 1)
189 			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
190 	}
191 	if (retval)
192 		return retval;
193 
194 	if (direction == EP_DIR_OUT)
195 		bit_pos = 1 << curr_req->ep->ep_num;
196 	else
197 		bit_pos = 1 << (16 + curr_req->ep->ep_num);
198 
199 	while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
200 		if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
201 			while (readl(&udc->op_regs->epstatus) & bit_pos)
202 				udelay(1);
203 			break;
204 		}
205 		udelay(1);
206 	}
207 
208 	curr_req->req.actual = actual;
209 
210 	return 0;
211 }
212 
213 /*
214  * done() - retire a request; caller blocked irqs
215  * @status : request status to be set, only works when
216  * request is still in progress.
217  */
done(struct mv_ep * ep,struct mv_req * req,int status)218 static void done(struct mv_ep *ep, struct mv_req *req, int status)
219 {
220 	struct mv_udc *udc = NULL;
221 	unsigned char stopped = ep->stopped;
222 	struct mv_dtd *curr_td, *next_td;
223 	int j;
224 
225 	udc = (struct mv_udc *)ep->udc;
226 	/* Removed the req from fsl_ep->queue */
227 	list_del_init(&req->queue);
228 
229 	/* req.status should be set as -EINPROGRESS in ep_queue() */
230 	if (req->req.status == -EINPROGRESS)
231 		req->req.status = status;
232 	else
233 		status = req->req.status;
234 
235 	/* Free dtd for the request */
236 	next_td = req->head;
237 	for (j = 0; j < req->dtd_count; j++) {
238 		curr_td = next_td;
239 		if (j != req->dtd_count - 1)
240 			next_td = curr_td->next_dtd_virt;
241 		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
242 	}
243 
244 	if (req->mapped) {
245 		dma_unmap_single(ep->udc->gadget.dev.parent,
246 			req->req.dma, req->req.length,
247 			((ep_dir(ep) == EP_DIR_IN) ?
248 				DMA_TO_DEVICE : DMA_FROM_DEVICE));
249 		req->req.dma = DMA_ADDR_INVALID;
250 		req->mapped = 0;
251 	} else
252 		dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
253 			req->req.dma, req->req.length,
254 			((ep_dir(ep) == EP_DIR_IN) ?
255 				DMA_TO_DEVICE : DMA_FROM_DEVICE));
256 
257 	if (status && (status != -ESHUTDOWN))
258 		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
259 			ep->ep.name, &req->req, status,
260 			req->req.actual, req->req.length);
261 
262 	ep->stopped = 1;
263 
264 	spin_unlock(&ep->udc->lock);
265 	/*
266 	 * complete() is from gadget layer,
267 	 * eg fsg->bulk_in_complete()
268 	 */
269 	if (req->req.complete)
270 		req->req.complete(&ep->ep, &req->req);
271 
272 	spin_lock(&ep->udc->lock);
273 	ep->stopped = stopped;
274 }
275 
queue_dtd(struct mv_ep * ep,struct mv_req * req)276 static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
277 {
278 	struct mv_udc *udc;
279 	struct mv_dqh *dqh;
280 	u32 bit_pos, direction;
281 	u32 usbcmd, epstatus;
282 	unsigned int loops;
283 	int retval = 0;
284 
285 	udc = ep->udc;
286 	direction = ep_dir(ep);
287 	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
288 	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
289 
290 	/* check if the pipe is empty */
291 	if (!(list_empty(&ep->queue))) {
292 		struct mv_req *lastreq;
293 		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
294 		lastreq->tail->dtd_next =
295 			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
296 
297 		wmb();
298 
299 		if (readl(&udc->op_regs->epprime) & bit_pos)
300 			goto done;
301 
302 		loops = LOOPS(READSAFE_TIMEOUT);
303 		while (1) {
304 			/* start with setting the semaphores */
305 			usbcmd = readl(&udc->op_regs->usbcmd);
306 			usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
307 			writel(usbcmd, &udc->op_regs->usbcmd);
308 
309 			/* read the endpoint status */
310 			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
311 
312 			/*
313 			 * Reread the ATDTW semaphore bit to check if it is
314 			 * cleared. When hardware see a hazard, it will clear
315 			 * the bit or else we remain set to 1 and we can
316 			 * proceed with priming of endpoint if not already
317 			 * primed.
318 			 */
319 			if (readl(&udc->op_regs->usbcmd)
320 				& USBCMD_ATDTW_TRIPWIRE_SET)
321 				break;
322 
323 			loops--;
324 			if (loops == 0) {
325 				dev_err(&udc->dev->dev,
326 					"Timeout for ATDTW_TRIPWIRE...\n");
327 				retval = -ETIME;
328 				goto done;
329 			}
330 			udelay(LOOPS_USEC);
331 		}
332 
333 		/* Clear the semaphore */
334 		usbcmd = readl(&udc->op_regs->usbcmd);
335 		usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
336 		writel(usbcmd, &udc->op_regs->usbcmd);
337 
338 		if (epstatus)
339 			goto done;
340 	}
341 
342 	/* Write dQH next pointer and terminate bit to 0 */
343 	dqh->next_dtd_ptr = req->head->td_dma
344 				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
345 
346 	/* clear active and halt bit, in case set from a previous error */
347 	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
348 
349 	/* Ensure that updates to the QH will occure before priming. */
350 	wmb();
351 
352 	/* Prime the Endpoint */
353 	writel(bit_pos, &udc->op_regs->epprime);
354 
355 done:
356 	return retval;
357 }
358 
359 
build_dtd(struct mv_req * req,unsigned * length,dma_addr_t * dma,int * is_last)360 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
361 		dma_addr_t *dma, int *is_last)
362 {
363 	u32 temp;
364 	struct mv_dtd *dtd;
365 	struct mv_udc *udc;
366 
367 	/* how big will this transfer be? */
368 	*length = min(req->req.length - req->req.actual,
369 			(unsigned)EP_MAX_LENGTH_TRANSFER);
370 
371 	udc = req->ep->udc;
372 
373 	/*
374 	 * Be careful that no _GFP_HIGHMEM is set,
375 	 * or we can not use dma_to_virt
376 	 */
377 	dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
378 	if (dtd == NULL)
379 		return dtd;
380 
381 	dtd->td_dma = *dma;
382 	/* initialize buffer page pointers */
383 	temp = (u32)(req->req.dma + req->req.actual);
384 	dtd->buff_ptr0 = cpu_to_le32(temp);
385 	temp &= ~0xFFF;
386 	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
387 	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
388 	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
389 	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
390 
391 	req->req.actual += *length;
392 
393 	/* zlp is needed if req->req.zero is set */
394 	if (req->req.zero) {
395 		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
396 			*is_last = 1;
397 		else
398 			*is_last = 0;
399 	} else if (req->req.length == req->req.actual)
400 		*is_last = 1;
401 	else
402 		*is_last = 0;
403 
404 	/* Fill in the transfer size; set active bit */
405 	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
406 
407 	/* Enable interrupt for the last dtd of a request */
408 	if (*is_last && !req->req.no_interrupt)
409 		temp |= DTD_IOC;
410 
411 	dtd->size_ioc_sts = temp;
412 
413 	mb();
414 
415 	return dtd;
416 }
417 
418 /* generate dTD linked list for a request */
req_to_dtd(struct mv_req * req)419 static int req_to_dtd(struct mv_req *req)
420 {
421 	unsigned count;
422 	int is_last, is_first = 1;
423 	struct mv_dtd *dtd, *last_dtd = NULL;
424 	struct mv_udc *udc;
425 	dma_addr_t dma;
426 
427 	udc = req->ep->udc;
428 
429 	do {
430 		dtd = build_dtd(req, &count, &dma, &is_last);
431 		if (dtd == NULL)
432 			return -ENOMEM;
433 
434 		if (is_first) {
435 			is_first = 0;
436 			req->head = dtd;
437 		} else {
438 			last_dtd->dtd_next = dma;
439 			last_dtd->next_dtd_virt = dtd;
440 		}
441 		last_dtd = dtd;
442 		req->dtd_count++;
443 	} while (!is_last);
444 
445 	/* set terminate bit to 1 for the last dTD */
446 	dtd->dtd_next = DTD_NEXT_TERMINATE;
447 
448 	req->tail = dtd;
449 
450 	return 0;
451 }
452 
mv_ep_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)453 static int mv_ep_enable(struct usb_ep *_ep,
454 		const struct usb_endpoint_descriptor *desc)
455 {
456 	struct mv_udc *udc;
457 	struct mv_ep *ep;
458 	struct mv_dqh *dqh;
459 	u16 max = 0;
460 	u32 bit_pos, epctrlx, direction;
461 	unsigned char zlt = 0, ios = 0, mult = 0;
462 	unsigned long flags;
463 
464 	ep = container_of(_ep, struct mv_ep, ep);
465 	udc = ep->udc;
466 
467 	if (!_ep || !desc || ep->desc
468 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
469 		return -EINVAL;
470 
471 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
472 		return -ESHUTDOWN;
473 
474 	direction = ep_dir(ep);
475 	max = usb_endpoint_maxp(desc);
476 
477 	/*
478 	 * disable HW zero length termination select
479 	 * driver handles zero length packet through req->req.zero
480 	 */
481 	zlt = 1;
482 
483 	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
484 
485 	/* Check if the Endpoint is Primed */
486 	if ((readl(&udc->op_regs->epprime) & bit_pos)
487 		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
488 		dev_info(&udc->dev->dev,
489 			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
490 			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
491 			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
492 			(unsigned)readl(&udc->op_regs->epprime),
493 			(unsigned)readl(&udc->op_regs->epstatus),
494 			(unsigned)bit_pos);
495 		goto en_done;
496 	}
497 	/* Set the max packet length, interrupt on Setup and Mult fields */
498 	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
499 	case USB_ENDPOINT_XFER_BULK:
500 		zlt = 1;
501 		mult = 0;
502 		break;
503 	case USB_ENDPOINT_XFER_CONTROL:
504 		ios = 1;
505 	case USB_ENDPOINT_XFER_INT:
506 		mult = 0;
507 		break;
508 	case USB_ENDPOINT_XFER_ISOC:
509 		/* Calculate transactions needed for high bandwidth iso */
510 		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
511 		max = max & 0x7ff;	/* bit 0~10 */
512 		/* 3 transactions at most */
513 		if (mult > 3)
514 			goto en_done;
515 		break;
516 	default:
517 		goto en_done;
518 	}
519 
520 	spin_lock_irqsave(&udc->lock, flags);
521 	/* Get the endpoint queue head address */
522 	dqh = ep->dqh;
523 	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
524 		| (mult << EP_QUEUE_HEAD_MULT_POS)
525 		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
526 		| (ios ? EP_QUEUE_HEAD_IOS : 0);
527 	dqh->next_dtd_ptr = 1;
528 	dqh->size_ioc_int_sts = 0;
529 
530 	ep->ep.maxpacket = max;
531 	ep->desc = desc;
532 	ep->stopped = 0;
533 
534 	/* Enable the endpoint for Rx or Tx and set the endpoint type */
535 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
536 	if (direction == EP_DIR_IN) {
537 		epctrlx &= ~EPCTRL_TX_ALL_MASK;
538 		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
539 			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
540 				<< EPCTRL_TX_EP_TYPE_SHIFT);
541 	} else {
542 		epctrlx &= ~EPCTRL_RX_ALL_MASK;
543 		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
544 			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
545 				<< EPCTRL_RX_EP_TYPE_SHIFT);
546 	}
547 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
548 
549 	/*
550 	 * Implement Guideline (GL# USB-7) The unused endpoint type must
551 	 * be programmed to bulk.
552 	 */
553 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
554 	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
555 		epctrlx |= (USB_ENDPOINT_XFER_BULK
556 				<< EPCTRL_RX_EP_TYPE_SHIFT);
557 		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
558 	}
559 
560 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
561 	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
562 		epctrlx |= (USB_ENDPOINT_XFER_BULK
563 				<< EPCTRL_TX_EP_TYPE_SHIFT);
564 		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
565 	}
566 
567 	spin_unlock_irqrestore(&udc->lock, flags);
568 
569 	return 0;
570 en_done:
571 	return -EINVAL;
572 }
573 
mv_ep_disable(struct usb_ep * _ep)574 static int  mv_ep_disable(struct usb_ep *_ep)
575 {
576 	struct mv_udc *udc;
577 	struct mv_ep *ep;
578 	struct mv_dqh *dqh;
579 	u32 bit_pos, epctrlx, direction;
580 	unsigned long flags;
581 
582 	ep = container_of(_ep, struct mv_ep, ep);
583 	if ((_ep == NULL) || !ep->desc)
584 		return -EINVAL;
585 
586 	udc = ep->udc;
587 
588 	/* Get the endpoint queue head address */
589 	dqh = ep->dqh;
590 
591 	spin_lock_irqsave(&udc->lock, flags);
592 
593 	direction = ep_dir(ep);
594 	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
595 
596 	/* Reset the max packet length and the interrupt on Setup */
597 	dqh->max_packet_length = 0;
598 
599 	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
600 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
601 	epctrlx &= ~((direction == EP_DIR_IN)
602 			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
603 			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
604 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
605 
606 	/* nuke all pending requests (does flush) */
607 	nuke(ep, -ESHUTDOWN);
608 
609 	ep->desc = NULL;
610 	ep->ep.desc = NULL;
611 	ep->stopped = 1;
612 
613 	spin_unlock_irqrestore(&udc->lock, flags);
614 
615 	return 0;
616 }
617 
618 static struct usb_request *
mv_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)619 mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
620 {
621 	struct mv_req *req = NULL;
622 
623 	req = kzalloc(sizeof *req, gfp_flags);
624 	if (!req)
625 		return NULL;
626 
627 	req->req.dma = DMA_ADDR_INVALID;
628 	INIT_LIST_HEAD(&req->queue);
629 
630 	return &req->req;
631 }
632 
mv_free_request(struct usb_ep * _ep,struct usb_request * _req)633 static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
634 {
635 	struct mv_req *req = NULL;
636 
637 	req = container_of(_req, struct mv_req, req);
638 
639 	if (_req)
640 		kfree(req);
641 }
642 
mv_ep_fifo_flush(struct usb_ep * _ep)643 static void mv_ep_fifo_flush(struct usb_ep *_ep)
644 {
645 	struct mv_udc *udc;
646 	u32 bit_pos, direction;
647 	struct mv_ep *ep;
648 	unsigned int loops;
649 
650 	if (!_ep)
651 		return;
652 
653 	ep = container_of(_ep, struct mv_ep, ep);
654 	if (!ep->desc)
655 		return;
656 
657 	udc = ep->udc;
658 	direction = ep_dir(ep);
659 
660 	if (ep->ep_num == 0)
661 		bit_pos = (1 << 16) | 1;
662 	else if (direction == EP_DIR_OUT)
663 		bit_pos = 1 << ep->ep_num;
664 	else
665 		bit_pos = 1 << (16 + ep->ep_num);
666 
667 	loops = LOOPS(EPSTATUS_TIMEOUT);
668 	do {
669 		unsigned int inter_loops;
670 
671 		if (loops == 0) {
672 			dev_err(&udc->dev->dev,
673 				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
674 				(unsigned)readl(&udc->op_regs->epstatus),
675 				(unsigned)bit_pos);
676 			return;
677 		}
678 		/* Write 1 to the Flush register */
679 		writel(bit_pos, &udc->op_regs->epflush);
680 
681 		/* Wait until flushing completed */
682 		inter_loops = LOOPS(FLUSH_TIMEOUT);
683 		while (readl(&udc->op_regs->epflush)) {
684 			/*
685 			 * ENDPTFLUSH bit should be cleared to indicate this
686 			 * operation is complete
687 			 */
688 			if (inter_loops == 0) {
689 				dev_err(&udc->dev->dev,
690 					"TIMEOUT for ENDPTFLUSH=0x%x,"
691 					"bit_pos=0x%x\n",
692 					(unsigned)readl(&udc->op_regs->epflush),
693 					(unsigned)bit_pos);
694 				return;
695 			}
696 			inter_loops--;
697 			udelay(LOOPS_USEC);
698 		}
699 		loops--;
700 	} while (readl(&udc->op_regs->epstatus) & bit_pos);
701 }
702 
703 /* queues (submits) an I/O request to an endpoint */
704 static int
mv_ep_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)705 mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
706 {
707 	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
708 	struct mv_req *req = container_of(_req, struct mv_req, req);
709 	struct mv_udc *udc = ep->udc;
710 	unsigned long flags;
711 
712 	/* catch various bogus parameters */
713 	if (!_req || !req->req.complete || !req->req.buf
714 			|| !list_empty(&req->queue)) {
715 		dev_err(&udc->dev->dev, "%s, bad params", __func__);
716 		return -EINVAL;
717 	}
718 	if (unlikely(!_ep || !ep->desc)) {
719 		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
720 		return -EINVAL;
721 	}
722 	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
723 		if (req->req.length > ep->ep.maxpacket)
724 			return -EMSGSIZE;
725 	}
726 
727 	udc = ep->udc;
728 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
729 		return -ESHUTDOWN;
730 
731 	req->ep = ep;
732 
733 	/* map virtual address to hardware */
734 	if (req->req.dma == DMA_ADDR_INVALID) {
735 		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
736 					req->req.buf,
737 					req->req.length, ep_dir(ep)
738 						? DMA_TO_DEVICE
739 						: DMA_FROM_DEVICE);
740 		req->mapped = 1;
741 	} else {
742 		dma_sync_single_for_device(ep->udc->gadget.dev.parent,
743 					req->req.dma, req->req.length,
744 					ep_dir(ep)
745 						? DMA_TO_DEVICE
746 						: DMA_FROM_DEVICE);
747 		req->mapped = 0;
748 	}
749 
750 	req->req.status = -EINPROGRESS;
751 	req->req.actual = 0;
752 	req->dtd_count = 0;
753 
754 	spin_lock_irqsave(&udc->lock, flags);
755 
756 	/* build dtds and push them to device queue */
757 	if (!req_to_dtd(req)) {
758 		int retval;
759 		retval = queue_dtd(ep, req);
760 		if (retval) {
761 			spin_unlock_irqrestore(&udc->lock, flags);
762 			return retval;
763 		}
764 	} else {
765 		spin_unlock_irqrestore(&udc->lock, flags);
766 		return -ENOMEM;
767 	}
768 
769 	/* Update ep0 state */
770 	if (ep->ep_num == 0)
771 		udc->ep0_state = DATA_STATE_XMIT;
772 
773 	/* irq handler advances the queue */
774 	list_add_tail(&req->queue, &ep->queue);
775 	spin_unlock_irqrestore(&udc->lock, flags);
776 
777 	return 0;
778 }
779 
mv_prime_ep(struct mv_ep * ep,struct mv_req * req)780 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
781 {
782 	struct mv_dqh *dqh = ep->dqh;
783 	u32 bit_pos;
784 
785 	/* Write dQH next pointer and terminate bit to 0 */
786 	dqh->next_dtd_ptr = req->head->td_dma
787 		& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
788 
789 	/* clear active and halt bit, in case set from a previous error */
790 	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
791 
792 	/* Ensure that updates to the QH will occure before priming. */
793 	wmb();
794 
795 	bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
796 
797 	/* Prime the Endpoint */
798 	writel(bit_pos, &ep->udc->op_regs->epprime);
799 }
800 
801 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
mv_ep_dequeue(struct usb_ep * _ep,struct usb_request * _req)802 static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
803 {
804 	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
805 	struct mv_req *req;
806 	struct mv_udc *udc = ep->udc;
807 	unsigned long flags;
808 	int stopped, ret = 0;
809 	u32 epctrlx;
810 
811 	if (!_ep || !_req)
812 		return -EINVAL;
813 
814 	spin_lock_irqsave(&ep->udc->lock, flags);
815 	stopped = ep->stopped;
816 
817 	/* Stop the ep before we deal with the queue */
818 	ep->stopped = 1;
819 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
820 	if (ep_dir(ep) == EP_DIR_IN)
821 		epctrlx &= ~EPCTRL_TX_ENABLE;
822 	else
823 		epctrlx &= ~EPCTRL_RX_ENABLE;
824 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
825 
826 	/* make sure it's actually queued on this endpoint */
827 	list_for_each_entry(req, &ep->queue, queue) {
828 		if (&req->req == _req)
829 			break;
830 	}
831 	if (&req->req != _req) {
832 		ret = -EINVAL;
833 		goto out;
834 	}
835 
836 	/* The request is in progress, or completed but not dequeued */
837 	if (ep->queue.next == &req->queue) {
838 		_req->status = -ECONNRESET;
839 		mv_ep_fifo_flush(_ep);	/* flush current transfer */
840 
841 		/* The request isn't the last request in this ep queue */
842 		if (req->queue.next != &ep->queue) {
843 			struct mv_req *next_req;
844 
845 			next_req = list_entry(req->queue.next,
846 				struct mv_req, queue);
847 
848 			/* Point the QH to the first TD of next request */
849 			mv_prime_ep(ep, next_req);
850 		} else {
851 			struct mv_dqh *qh;
852 
853 			qh = ep->dqh;
854 			qh->next_dtd_ptr = 1;
855 			qh->size_ioc_int_sts = 0;
856 		}
857 
858 		/* The request hasn't been processed, patch up the TD chain */
859 	} else {
860 		struct mv_req *prev_req;
861 
862 		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
863 		writel(readl(&req->tail->dtd_next),
864 				&prev_req->tail->dtd_next);
865 
866 	}
867 
868 	done(ep, req, -ECONNRESET);
869 
870 	/* Enable EP */
871 out:
872 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
873 	if (ep_dir(ep) == EP_DIR_IN)
874 		epctrlx |= EPCTRL_TX_ENABLE;
875 	else
876 		epctrlx |= EPCTRL_RX_ENABLE;
877 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
878 	ep->stopped = stopped;
879 
880 	spin_unlock_irqrestore(&ep->udc->lock, flags);
881 	return ret;
882 }
883 
ep_set_stall(struct mv_udc * udc,u8 ep_num,u8 direction,int stall)884 static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
885 {
886 	u32 epctrlx;
887 
888 	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
889 
890 	if (stall) {
891 		if (direction == EP_DIR_IN)
892 			epctrlx |= EPCTRL_TX_EP_STALL;
893 		else
894 			epctrlx |= EPCTRL_RX_EP_STALL;
895 	} else {
896 		if (direction == EP_DIR_IN) {
897 			epctrlx &= ~EPCTRL_TX_EP_STALL;
898 			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
899 		} else {
900 			epctrlx &= ~EPCTRL_RX_EP_STALL;
901 			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
902 		}
903 	}
904 	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
905 }
906 
ep_is_stall(struct mv_udc * udc,u8 ep_num,u8 direction)907 static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
908 {
909 	u32 epctrlx;
910 
911 	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
912 
913 	if (direction == EP_DIR_OUT)
914 		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
915 	else
916 		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
917 }
918 
mv_ep_set_halt_wedge(struct usb_ep * _ep,int halt,int wedge)919 static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
920 {
921 	struct mv_ep *ep;
922 	unsigned long flags = 0;
923 	int status = 0;
924 	struct mv_udc *udc;
925 
926 	ep = container_of(_ep, struct mv_ep, ep);
927 	udc = ep->udc;
928 	if (!_ep || !ep->desc) {
929 		status = -EINVAL;
930 		goto out;
931 	}
932 
933 	if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
934 		status = -EOPNOTSUPP;
935 		goto out;
936 	}
937 
938 	/*
939 	 * Attempt to halt IN ep will fail if any transfer requests
940 	 * are still queue
941 	 */
942 	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
943 		status = -EAGAIN;
944 		goto out;
945 	}
946 
947 	spin_lock_irqsave(&ep->udc->lock, flags);
948 	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
949 	if (halt && wedge)
950 		ep->wedge = 1;
951 	else if (!halt)
952 		ep->wedge = 0;
953 	spin_unlock_irqrestore(&ep->udc->lock, flags);
954 
955 	if (ep->ep_num == 0) {
956 		udc->ep0_state = WAIT_FOR_SETUP;
957 		udc->ep0_dir = EP_DIR_OUT;
958 	}
959 out:
960 	return status;
961 }
962 
mv_ep_set_halt(struct usb_ep * _ep,int halt)963 static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
964 {
965 	return mv_ep_set_halt_wedge(_ep, halt, 0);
966 }
967 
mv_ep_set_wedge(struct usb_ep * _ep)968 static int mv_ep_set_wedge(struct usb_ep *_ep)
969 {
970 	return mv_ep_set_halt_wedge(_ep, 1, 1);
971 }
972 
973 static struct usb_ep_ops mv_ep_ops = {
974 	.enable		= mv_ep_enable,
975 	.disable	= mv_ep_disable,
976 
977 	.alloc_request	= mv_alloc_request,
978 	.free_request	= mv_free_request,
979 
980 	.queue		= mv_ep_queue,
981 	.dequeue	= mv_ep_dequeue,
982 
983 	.set_wedge	= mv_ep_set_wedge,
984 	.set_halt	= mv_ep_set_halt,
985 	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
986 };
987 
udc_clock_enable(struct mv_udc * udc)988 static void udc_clock_enable(struct mv_udc *udc)
989 {
990 	unsigned int i;
991 
992 	for (i = 0; i < udc->clknum; i++)
993 		clk_enable(udc->clk[i]);
994 }
995 
udc_clock_disable(struct mv_udc * udc)996 static void udc_clock_disable(struct mv_udc *udc)
997 {
998 	unsigned int i;
999 
1000 	for (i = 0; i < udc->clknum; i++)
1001 		clk_disable(udc->clk[i]);
1002 }
1003 
udc_stop(struct mv_udc * udc)1004 static void udc_stop(struct mv_udc *udc)
1005 {
1006 	u32 tmp;
1007 
1008 	/* Disable interrupts */
1009 	tmp = readl(&udc->op_regs->usbintr);
1010 	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1011 		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1012 	writel(tmp, &udc->op_regs->usbintr);
1013 
1014 	udc->stopped = 1;
1015 
1016 	/* Reset the Run the bit in the command register to stop VUSB */
1017 	tmp = readl(&udc->op_regs->usbcmd);
1018 	tmp &= ~USBCMD_RUN_STOP;
1019 	writel(tmp, &udc->op_regs->usbcmd);
1020 }
1021 
udc_start(struct mv_udc * udc)1022 static void udc_start(struct mv_udc *udc)
1023 {
1024 	u32 usbintr;
1025 
1026 	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1027 		| USBINTR_PORT_CHANGE_DETECT_EN
1028 		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1029 	/* Enable interrupts */
1030 	writel(usbintr, &udc->op_regs->usbintr);
1031 
1032 	udc->stopped = 0;
1033 
1034 	/* Set the Run bit in the command register */
1035 	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1036 }
1037 
udc_reset(struct mv_udc * udc)1038 static int udc_reset(struct mv_udc *udc)
1039 {
1040 	unsigned int loops;
1041 	u32 tmp, portsc;
1042 
1043 	/* Stop the controller */
1044 	tmp = readl(&udc->op_regs->usbcmd);
1045 	tmp &= ~USBCMD_RUN_STOP;
1046 	writel(tmp, &udc->op_regs->usbcmd);
1047 
1048 	/* Reset the controller to get default values */
1049 	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1050 
1051 	/* wait for reset to complete */
1052 	loops = LOOPS(RESET_TIMEOUT);
1053 	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1054 		if (loops == 0) {
1055 			dev_err(&udc->dev->dev,
1056 				"Wait for RESET completed TIMEOUT\n");
1057 			return -ETIMEDOUT;
1058 		}
1059 		loops--;
1060 		udelay(LOOPS_USEC);
1061 	}
1062 
1063 	/* set controller to device mode */
1064 	tmp = readl(&udc->op_regs->usbmode);
1065 	tmp |= USBMODE_CTRL_MODE_DEVICE;
1066 
1067 	/* turn setup lockout off, require setup tripwire in usbcmd */
1068 	tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
1069 
1070 	writel(tmp, &udc->op_regs->usbmode);
1071 
1072 	writel(0x0, &udc->op_regs->epsetupstat);
1073 
1074 	/* Configure the Endpoint List Address */
1075 	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1076 		&udc->op_regs->eplistaddr);
1077 
1078 	portsc = readl(&udc->op_regs->portsc[0]);
1079 	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1080 		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1081 
1082 	if (udc->force_fs)
1083 		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1084 	else
1085 		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1086 
1087 	writel(portsc, &udc->op_regs->portsc[0]);
1088 
1089 	tmp = readl(&udc->op_regs->epctrlx[0]);
1090 	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1091 	writel(tmp, &udc->op_regs->epctrlx[0]);
1092 
1093 	return 0;
1094 }
1095 
mv_udc_enable_internal(struct mv_udc * udc)1096 static int mv_udc_enable_internal(struct mv_udc *udc)
1097 {
1098 	int retval;
1099 
1100 	if (udc->active)
1101 		return 0;
1102 
1103 	dev_dbg(&udc->dev->dev, "enable udc\n");
1104 	udc_clock_enable(udc);
1105 	if (udc->pdata->phy_init) {
1106 		retval = udc->pdata->phy_init(udc->phy_regs);
1107 		if (retval) {
1108 			dev_err(&udc->dev->dev,
1109 				"init phy error %d\n", retval);
1110 			udc_clock_disable(udc);
1111 			return retval;
1112 		}
1113 	}
1114 	udc->active = 1;
1115 
1116 	return 0;
1117 }
1118 
mv_udc_enable(struct mv_udc * udc)1119 static int mv_udc_enable(struct mv_udc *udc)
1120 {
1121 	if (udc->clock_gating)
1122 		return mv_udc_enable_internal(udc);
1123 
1124 	return 0;
1125 }
1126 
mv_udc_disable_internal(struct mv_udc * udc)1127 static void mv_udc_disable_internal(struct mv_udc *udc)
1128 {
1129 	if (udc->active) {
1130 		dev_dbg(&udc->dev->dev, "disable udc\n");
1131 		if (udc->pdata->phy_deinit)
1132 			udc->pdata->phy_deinit(udc->phy_regs);
1133 		udc_clock_disable(udc);
1134 		udc->active = 0;
1135 	}
1136 }
1137 
mv_udc_disable(struct mv_udc * udc)1138 static void mv_udc_disable(struct mv_udc *udc)
1139 {
1140 	if (udc->clock_gating)
1141 		mv_udc_disable_internal(udc);
1142 }
1143 
mv_udc_get_frame(struct usb_gadget * gadget)1144 static int mv_udc_get_frame(struct usb_gadget *gadget)
1145 {
1146 	struct mv_udc *udc;
1147 	u16	retval;
1148 
1149 	if (!gadget)
1150 		return -ENODEV;
1151 
1152 	udc = container_of(gadget, struct mv_udc, gadget);
1153 
1154 	retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1155 
1156 	return retval;
1157 }
1158 
1159 /* Tries to wake up the host connected to this gadget */
mv_udc_wakeup(struct usb_gadget * gadget)1160 static int mv_udc_wakeup(struct usb_gadget *gadget)
1161 {
1162 	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1163 	u32 portsc;
1164 
1165 	/* Remote wakeup feature not enabled by host */
1166 	if (!udc->remote_wakeup)
1167 		return -ENOTSUPP;
1168 
1169 	portsc = readl(&udc->op_regs->portsc);
1170 	/* not suspended? */
1171 	if (!(portsc & PORTSCX_PORT_SUSPEND))
1172 		return 0;
1173 	/* trigger force resume */
1174 	portsc |= PORTSCX_PORT_FORCE_RESUME;
1175 	writel(portsc, &udc->op_regs->portsc[0]);
1176 	return 0;
1177 }
1178 
mv_udc_vbus_session(struct usb_gadget * gadget,int is_active)1179 static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1180 {
1181 	struct mv_udc *udc;
1182 	unsigned long flags;
1183 	int retval = 0;
1184 
1185 	udc = container_of(gadget, struct mv_udc, gadget);
1186 	spin_lock_irqsave(&udc->lock, flags);
1187 
1188 	udc->vbus_active = (is_active != 0);
1189 
1190 	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1191 		__func__, udc->softconnect, udc->vbus_active);
1192 
1193 	if (udc->driver && udc->softconnect && udc->vbus_active) {
1194 		retval = mv_udc_enable(udc);
1195 		if (retval == 0) {
1196 			/* Clock is disabled, need re-init registers */
1197 			udc_reset(udc);
1198 			ep0_reset(udc);
1199 			udc_start(udc);
1200 		}
1201 	} else if (udc->driver && udc->softconnect) {
1202 		/* stop all the transfer in queue*/
1203 		stop_activity(udc, udc->driver);
1204 		udc_stop(udc);
1205 		mv_udc_disable(udc);
1206 	}
1207 
1208 	spin_unlock_irqrestore(&udc->lock, flags);
1209 	return retval;
1210 }
1211 
mv_udc_pullup(struct usb_gadget * gadget,int is_on)1212 static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1213 {
1214 	struct mv_udc *udc;
1215 	unsigned long flags;
1216 	int retval = 0;
1217 
1218 	udc = container_of(gadget, struct mv_udc, gadget);
1219 	spin_lock_irqsave(&udc->lock, flags);
1220 
1221 	udc->softconnect = (is_on != 0);
1222 
1223 	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1224 			__func__, udc->softconnect, udc->vbus_active);
1225 
1226 	if (udc->driver && udc->softconnect && udc->vbus_active) {
1227 		retval = mv_udc_enable(udc);
1228 		if (retval == 0) {
1229 			/* Clock is disabled, need re-init registers */
1230 			udc_reset(udc);
1231 			ep0_reset(udc);
1232 			udc_start(udc);
1233 		}
1234 	} else if (udc->driver && udc->vbus_active) {
1235 		/* stop all the transfer in queue*/
1236 		stop_activity(udc, udc->driver);
1237 		udc_stop(udc);
1238 		mv_udc_disable(udc);
1239 	}
1240 
1241 	spin_unlock_irqrestore(&udc->lock, flags);
1242 	return retval;
1243 }
1244 
1245 static int mv_udc_start(struct usb_gadget_driver *driver,
1246 		int (*bind)(struct usb_gadget *));
1247 static int mv_udc_stop(struct usb_gadget_driver *driver);
1248 /* device controller usb_gadget_ops structure */
1249 static const struct usb_gadget_ops mv_ops = {
1250 
1251 	/* returns the current frame number */
1252 	.get_frame	= mv_udc_get_frame,
1253 
1254 	/* tries to wake up the host connected to this gadget */
1255 	.wakeup		= mv_udc_wakeup,
1256 
1257 	/* notify controller that VBUS is powered or not */
1258 	.vbus_session	= mv_udc_vbus_session,
1259 
1260 	/* D+ pullup, software-controlled connect/disconnect to USB host */
1261 	.pullup		= mv_udc_pullup,
1262 	.start		= mv_udc_start,
1263 	.stop		= mv_udc_stop,
1264 };
1265 
eps_init(struct mv_udc * udc)1266 static int eps_init(struct mv_udc *udc)
1267 {
1268 	struct mv_ep	*ep;
1269 	char name[14];
1270 	int i;
1271 
1272 	/* initialize ep0 */
1273 	ep = &udc->eps[0];
1274 	ep->udc = udc;
1275 	strncpy(ep->name, "ep0", sizeof(ep->name));
1276 	ep->ep.name = ep->name;
1277 	ep->ep.ops = &mv_ep_ops;
1278 	ep->wedge = 0;
1279 	ep->stopped = 0;
1280 	ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1281 	ep->ep_num = 0;
1282 	ep->desc = &mv_ep0_desc;
1283 	INIT_LIST_HEAD(&ep->queue);
1284 
1285 	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1286 
1287 	/* initialize other endpoints */
1288 	for (i = 2; i < udc->max_eps * 2; i++) {
1289 		ep = &udc->eps[i];
1290 		if (i % 2) {
1291 			snprintf(name, sizeof(name), "ep%din", i / 2);
1292 			ep->direction = EP_DIR_IN;
1293 		} else {
1294 			snprintf(name, sizeof(name), "ep%dout", i / 2);
1295 			ep->direction = EP_DIR_OUT;
1296 		}
1297 		ep->udc = udc;
1298 		strncpy(ep->name, name, sizeof(ep->name));
1299 		ep->ep.name = ep->name;
1300 
1301 		ep->ep.ops = &mv_ep_ops;
1302 		ep->stopped = 0;
1303 		ep->ep.maxpacket = (unsigned short) ~0;
1304 		ep->ep_num = i / 2;
1305 
1306 		INIT_LIST_HEAD(&ep->queue);
1307 		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1308 
1309 		ep->dqh = &udc->ep_dqh[i];
1310 	}
1311 
1312 	return 0;
1313 }
1314 
1315 /* delete all endpoint requests, called with spinlock held */
nuke(struct mv_ep * ep,int status)1316 static void nuke(struct mv_ep *ep, int status)
1317 {
1318 	/* called with spinlock held */
1319 	ep->stopped = 1;
1320 
1321 	/* endpoint fifo flush */
1322 	mv_ep_fifo_flush(&ep->ep);
1323 
1324 	while (!list_empty(&ep->queue)) {
1325 		struct mv_req *req = NULL;
1326 		req = list_entry(ep->queue.next, struct mv_req, queue);
1327 		done(ep, req, status);
1328 	}
1329 }
1330 
1331 /* stop all USB activities */
stop_activity(struct mv_udc * udc,struct usb_gadget_driver * driver)1332 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1333 {
1334 	struct mv_ep	*ep;
1335 
1336 	nuke(&udc->eps[0], -ESHUTDOWN);
1337 
1338 	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1339 		nuke(ep, -ESHUTDOWN);
1340 	}
1341 
1342 	/* report disconnect; the driver is already quiesced */
1343 	if (driver) {
1344 		spin_unlock(&udc->lock);
1345 		driver->disconnect(&udc->gadget);
1346 		spin_lock(&udc->lock);
1347 	}
1348 }
1349 
mv_udc_start(struct usb_gadget_driver * driver,int (* bind)(struct usb_gadget *))1350 static int mv_udc_start(struct usb_gadget_driver *driver,
1351 		int (*bind)(struct usb_gadget *))
1352 {
1353 	struct mv_udc *udc = the_controller;
1354 	int retval = 0;
1355 	unsigned long flags;
1356 
1357 	if (!udc)
1358 		return -ENODEV;
1359 
1360 	if (udc->driver)
1361 		return -EBUSY;
1362 
1363 	spin_lock_irqsave(&udc->lock, flags);
1364 
1365 	/* hook up the driver ... */
1366 	driver->driver.bus = NULL;
1367 	udc->driver = driver;
1368 	udc->gadget.dev.driver = &driver->driver;
1369 
1370 	udc->usb_state = USB_STATE_ATTACHED;
1371 	udc->ep0_state = WAIT_FOR_SETUP;
1372 	udc->ep0_dir = EP_DIR_OUT;
1373 
1374 	spin_unlock_irqrestore(&udc->lock, flags);
1375 
1376 	retval = bind(&udc->gadget);
1377 	if (retval) {
1378 		dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1379 				driver->driver.name, retval);
1380 		udc->driver = NULL;
1381 		udc->gadget.dev.driver = NULL;
1382 		return retval;
1383 	}
1384 
1385 	if (udc->transceiver) {
1386 		retval = otg_set_peripheral(udc->transceiver->otg,
1387 					&udc->gadget);
1388 		if (retval) {
1389 			dev_err(&udc->dev->dev,
1390 				"unable to register peripheral to otg\n");
1391 			if (driver->unbind) {
1392 				driver->unbind(&udc->gadget);
1393 				udc->gadget.dev.driver = NULL;
1394 				udc->driver = NULL;
1395 			}
1396 			return retval;
1397 		}
1398 	}
1399 
1400 	/* pullup is always on */
1401 	mv_udc_pullup(&udc->gadget, 1);
1402 
1403 	/* When boot with cable attached, there will be no vbus irq occurred */
1404 	if (udc->qwork)
1405 		queue_work(udc->qwork, &udc->vbus_work);
1406 
1407 	return 0;
1408 }
1409 
mv_udc_stop(struct usb_gadget_driver * driver)1410 static int mv_udc_stop(struct usb_gadget_driver *driver)
1411 {
1412 	struct mv_udc *udc = the_controller;
1413 	unsigned long flags;
1414 
1415 	if (!udc)
1416 		return -ENODEV;
1417 
1418 	spin_lock_irqsave(&udc->lock, flags);
1419 
1420 	mv_udc_enable(udc);
1421 	udc_stop(udc);
1422 
1423 	/* stop all usb activities */
1424 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1425 	stop_activity(udc, driver);
1426 	mv_udc_disable(udc);
1427 
1428 	spin_unlock_irqrestore(&udc->lock, flags);
1429 
1430 	/* unbind gadget driver */
1431 	driver->unbind(&udc->gadget);
1432 	udc->gadget.dev.driver = NULL;
1433 	udc->driver = NULL;
1434 
1435 	return 0;
1436 }
1437 
mv_set_ptc(struct mv_udc * udc,u32 mode)1438 static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1439 {
1440 	u32 portsc;
1441 
1442 	portsc = readl(&udc->op_regs->portsc[0]);
1443 	portsc |= mode << 16;
1444 	writel(portsc, &udc->op_regs->portsc[0]);
1445 }
1446 
prime_status_complete(struct usb_ep * ep,struct usb_request * _req)1447 static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1448 {
1449 	struct mv_udc *udc = the_controller;
1450 	struct mv_req *req = container_of(_req, struct mv_req, req);
1451 	unsigned long flags;
1452 
1453 	dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1454 
1455 	spin_lock_irqsave(&udc->lock, flags);
1456 	if (req->test_mode) {
1457 		mv_set_ptc(udc, req->test_mode);
1458 		req->test_mode = 0;
1459 	}
1460 	spin_unlock_irqrestore(&udc->lock, flags);
1461 }
1462 
1463 static int
udc_prime_status(struct mv_udc * udc,u8 direction,u16 status,bool empty)1464 udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1465 {
1466 	int retval = 0;
1467 	struct mv_req *req;
1468 	struct mv_ep *ep;
1469 
1470 	ep = &udc->eps[0];
1471 	udc->ep0_dir = direction;
1472 	udc->ep0_state = WAIT_FOR_OUT_STATUS;
1473 
1474 	req = udc->status_req;
1475 
1476 	/* fill in the reqest structure */
1477 	if (empty == false) {
1478 		*((u16 *) req->req.buf) = cpu_to_le16(status);
1479 		req->req.length = 2;
1480 	} else
1481 		req->req.length = 0;
1482 
1483 	req->ep = ep;
1484 	req->req.status = -EINPROGRESS;
1485 	req->req.actual = 0;
1486 	if (udc->test_mode) {
1487 		req->req.complete = prime_status_complete;
1488 		req->test_mode = udc->test_mode;
1489 		udc->test_mode = 0;
1490 	} else
1491 		req->req.complete = NULL;
1492 	req->dtd_count = 0;
1493 
1494 	if (req->req.dma == DMA_ADDR_INVALID) {
1495 		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1496 				req->req.buf, req->req.length,
1497 				ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1498 		req->mapped = 1;
1499 	}
1500 
1501 	/* prime the data phase */
1502 	if (!req_to_dtd(req))
1503 		retval = queue_dtd(ep, req);
1504 	else{	/* no mem */
1505 		retval = -ENOMEM;
1506 		goto out;
1507 	}
1508 
1509 	if (retval) {
1510 		dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
1511 		goto out;
1512 	}
1513 
1514 	list_add_tail(&req->queue, &ep->queue);
1515 
1516 	return 0;
1517 out:
1518 	return retval;
1519 }
1520 
mv_udc_testmode(struct mv_udc * udc,u16 index)1521 static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1522 {
1523 	if (index <= TEST_FORCE_EN) {
1524 		udc->test_mode = index;
1525 		if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1526 			ep0_stall(udc);
1527 	} else
1528 		dev_err(&udc->dev->dev,
1529 			"This test mode(%d) is not supported\n", index);
1530 }
1531 
ch9setaddress(struct mv_udc * udc,struct usb_ctrlrequest * setup)1532 static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1533 {
1534 	udc->dev_addr = (u8)setup->wValue;
1535 
1536 	/* update usb state */
1537 	udc->usb_state = USB_STATE_ADDRESS;
1538 
1539 	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1540 		ep0_stall(udc);
1541 }
1542 
ch9getstatus(struct mv_udc * udc,u8 ep_num,struct usb_ctrlrequest * setup)1543 static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1544 	struct usb_ctrlrequest *setup)
1545 {
1546 	u16 status = 0;
1547 	int retval;
1548 
1549 	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1550 		!= (USB_DIR_IN | USB_TYPE_STANDARD))
1551 		return;
1552 
1553 	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1554 		status = 1 << USB_DEVICE_SELF_POWERED;
1555 		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1556 	} else if ((setup->bRequestType & USB_RECIP_MASK)
1557 			== USB_RECIP_INTERFACE) {
1558 		/* get interface status */
1559 		status = 0;
1560 	} else if ((setup->bRequestType & USB_RECIP_MASK)
1561 			== USB_RECIP_ENDPOINT) {
1562 		u8 ep_num, direction;
1563 
1564 		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1565 		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1566 				? EP_DIR_IN : EP_DIR_OUT;
1567 		status = ep_is_stall(udc, ep_num, direction)
1568 				<< USB_ENDPOINT_HALT;
1569 	}
1570 
1571 	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1572 	if (retval)
1573 		ep0_stall(udc);
1574 	else
1575 		udc->ep0_state = DATA_STATE_XMIT;
1576 }
1577 
ch9clearfeature(struct mv_udc * udc,struct usb_ctrlrequest * setup)1578 static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1579 {
1580 	u8 ep_num;
1581 	u8 direction;
1582 	struct mv_ep *ep;
1583 
1584 	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1585 		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1586 		switch (setup->wValue) {
1587 		case USB_DEVICE_REMOTE_WAKEUP:
1588 			udc->remote_wakeup = 0;
1589 			break;
1590 		default:
1591 			goto out;
1592 		}
1593 	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1594 		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1595 		switch (setup->wValue) {
1596 		case USB_ENDPOINT_HALT:
1597 			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1598 			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1599 				? EP_DIR_IN : EP_DIR_OUT;
1600 			if (setup->wValue != 0 || setup->wLength != 0
1601 				|| ep_num > udc->max_eps)
1602 				goto out;
1603 			ep = &udc->eps[ep_num * 2 + direction];
1604 			if (ep->wedge == 1)
1605 				break;
1606 			spin_unlock(&udc->lock);
1607 			ep_set_stall(udc, ep_num, direction, 0);
1608 			spin_lock(&udc->lock);
1609 			break;
1610 		default:
1611 			goto out;
1612 		}
1613 	} else
1614 		goto out;
1615 
1616 	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1617 		ep0_stall(udc);
1618 out:
1619 	return;
1620 }
1621 
ch9setfeature(struct mv_udc * udc,struct usb_ctrlrequest * setup)1622 static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1623 {
1624 	u8 ep_num;
1625 	u8 direction;
1626 
1627 	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1628 		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1629 		switch (setup->wValue) {
1630 		case USB_DEVICE_REMOTE_WAKEUP:
1631 			udc->remote_wakeup = 1;
1632 			break;
1633 		case USB_DEVICE_TEST_MODE:
1634 			if (setup->wIndex & 0xFF
1635 				||  udc->gadget.speed != USB_SPEED_HIGH)
1636 				ep0_stall(udc);
1637 
1638 			if (udc->usb_state != USB_STATE_CONFIGURED
1639 				&& udc->usb_state != USB_STATE_ADDRESS
1640 				&& udc->usb_state != USB_STATE_DEFAULT)
1641 				ep0_stall(udc);
1642 
1643 			mv_udc_testmode(udc, (setup->wIndex >> 8));
1644 			goto out;
1645 		default:
1646 			goto out;
1647 		}
1648 	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1649 		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1650 		switch (setup->wValue) {
1651 		case USB_ENDPOINT_HALT:
1652 			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1653 			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1654 				? EP_DIR_IN : EP_DIR_OUT;
1655 			if (setup->wValue != 0 || setup->wLength != 0
1656 				|| ep_num > udc->max_eps)
1657 				goto out;
1658 			spin_unlock(&udc->lock);
1659 			ep_set_stall(udc, ep_num, direction, 1);
1660 			spin_lock(&udc->lock);
1661 			break;
1662 		default:
1663 			goto out;
1664 		}
1665 	} else
1666 		goto out;
1667 
1668 	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1669 		ep0_stall(udc);
1670 out:
1671 	return;
1672 }
1673 
handle_setup_packet(struct mv_udc * udc,u8 ep_num,struct usb_ctrlrequest * setup)1674 static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1675 	struct usb_ctrlrequest *setup)
1676 {
1677 	bool delegate = false;
1678 
1679 	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1680 
1681 	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1682 			setup->bRequestType, setup->bRequest,
1683 			setup->wValue, setup->wIndex, setup->wLength);
1684 	/* We process some stardard setup requests here */
1685 	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1686 		switch (setup->bRequest) {
1687 		case USB_REQ_GET_STATUS:
1688 			ch9getstatus(udc, ep_num, setup);
1689 			break;
1690 
1691 		case USB_REQ_SET_ADDRESS:
1692 			ch9setaddress(udc, setup);
1693 			break;
1694 
1695 		case USB_REQ_CLEAR_FEATURE:
1696 			ch9clearfeature(udc, setup);
1697 			break;
1698 
1699 		case USB_REQ_SET_FEATURE:
1700 			ch9setfeature(udc, setup);
1701 			break;
1702 
1703 		default:
1704 			delegate = true;
1705 		}
1706 	} else
1707 		delegate = true;
1708 
1709 	/* delegate USB standard requests to the gadget driver */
1710 	if (delegate == true) {
1711 		/* USB requests handled by gadget */
1712 		if (setup->wLength) {
1713 			/* DATA phase from gadget, STATUS phase from udc */
1714 			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1715 					?  EP_DIR_IN : EP_DIR_OUT;
1716 			spin_unlock(&udc->lock);
1717 			if (udc->driver->setup(&udc->gadget,
1718 				&udc->local_setup_buff) < 0)
1719 				ep0_stall(udc);
1720 			spin_lock(&udc->lock);
1721 			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1722 					?  DATA_STATE_XMIT : DATA_STATE_RECV;
1723 		} else {
1724 			/* no DATA phase, IN STATUS phase from gadget */
1725 			udc->ep0_dir = EP_DIR_IN;
1726 			spin_unlock(&udc->lock);
1727 			if (udc->driver->setup(&udc->gadget,
1728 				&udc->local_setup_buff) < 0)
1729 				ep0_stall(udc);
1730 			spin_lock(&udc->lock);
1731 			udc->ep0_state = WAIT_FOR_OUT_STATUS;
1732 		}
1733 	}
1734 }
1735 
1736 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
ep0_req_complete(struct mv_udc * udc,struct mv_ep * ep0,struct mv_req * req)1737 static void ep0_req_complete(struct mv_udc *udc,
1738 	struct mv_ep *ep0, struct mv_req *req)
1739 {
1740 	u32 new_addr;
1741 
1742 	if (udc->usb_state == USB_STATE_ADDRESS) {
1743 		/* set the new address */
1744 		new_addr = (u32)udc->dev_addr;
1745 		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1746 			&udc->op_regs->deviceaddr);
1747 	}
1748 
1749 	done(ep0, req, 0);
1750 
1751 	switch (udc->ep0_state) {
1752 	case DATA_STATE_XMIT:
1753 		/* receive status phase */
1754 		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1755 			ep0_stall(udc);
1756 		break;
1757 	case DATA_STATE_RECV:
1758 		/* send status phase */
1759 		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1760 			ep0_stall(udc);
1761 		break;
1762 	case WAIT_FOR_OUT_STATUS:
1763 		udc->ep0_state = WAIT_FOR_SETUP;
1764 		break;
1765 	case WAIT_FOR_SETUP:
1766 		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1767 		break;
1768 	default:
1769 		ep0_stall(udc);
1770 		break;
1771 	}
1772 }
1773 
get_setup_data(struct mv_udc * udc,u8 ep_num,u8 * buffer_ptr)1774 static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1775 {
1776 	u32 temp;
1777 	struct mv_dqh *dqh;
1778 
1779 	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1780 
1781 	/* Clear bit in ENDPTSETUPSTAT */
1782 	writel((1 << ep_num), &udc->op_regs->epsetupstat);
1783 
1784 	/* while a hazard exists when setup package arrives */
1785 	do {
1786 		/* Set Setup Tripwire */
1787 		temp = readl(&udc->op_regs->usbcmd);
1788 		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1789 
1790 		/* Copy the setup packet to local buffer */
1791 		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1792 	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1793 
1794 	/* Clear Setup Tripwire */
1795 	temp = readl(&udc->op_regs->usbcmd);
1796 	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1797 }
1798 
irq_process_tr_complete(struct mv_udc * udc)1799 static void irq_process_tr_complete(struct mv_udc *udc)
1800 {
1801 	u32 tmp, bit_pos;
1802 	int i, ep_num = 0, direction = 0;
1803 	struct mv_ep	*curr_ep;
1804 	struct mv_req *curr_req, *temp_req;
1805 	int status;
1806 
1807 	/*
1808 	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1809 	 * because the setup packets are to be read ASAP
1810 	 */
1811 
1812 	/* Process all Setup packet received interrupts */
1813 	tmp = readl(&udc->op_regs->epsetupstat);
1814 
1815 	if (tmp) {
1816 		for (i = 0; i < udc->max_eps; i++) {
1817 			if (tmp & (1 << i)) {
1818 				get_setup_data(udc, i,
1819 					(u8 *)(&udc->local_setup_buff));
1820 				handle_setup_packet(udc, i,
1821 					&udc->local_setup_buff);
1822 			}
1823 		}
1824 	}
1825 
1826 	/* Don't clear the endpoint setup status register here.
1827 	 * It is cleared as a setup packet is read out of the buffer
1828 	 */
1829 
1830 	/* Process non-setup transaction complete interrupts */
1831 	tmp = readl(&udc->op_regs->epcomplete);
1832 
1833 	if (!tmp)
1834 		return;
1835 
1836 	writel(tmp, &udc->op_regs->epcomplete);
1837 
1838 	for (i = 0; i < udc->max_eps * 2; i++) {
1839 		ep_num = i >> 1;
1840 		direction = i % 2;
1841 
1842 		bit_pos = 1 << (ep_num + 16 * direction);
1843 
1844 		if (!(bit_pos & tmp))
1845 			continue;
1846 
1847 		if (i == 1)
1848 			curr_ep = &udc->eps[0];
1849 		else
1850 			curr_ep = &udc->eps[i];
1851 		/* process the req queue until an uncomplete request */
1852 		list_for_each_entry_safe(curr_req, temp_req,
1853 			&curr_ep->queue, queue) {
1854 			status = process_ep_req(udc, i, curr_req);
1855 			if (status)
1856 				break;
1857 
1858 			/* write back status to req */
1859 			curr_req->req.status = status;
1860 
1861 			/* ep0 request completion */
1862 			if (ep_num == 0) {
1863 				ep0_req_complete(udc, curr_ep, curr_req);
1864 				break;
1865 			} else {
1866 				done(curr_ep, curr_req, status);
1867 			}
1868 		}
1869 	}
1870 }
1871 
irq_process_reset(struct mv_udc * udc)1872 void irq_process_reset(struct mv_udc *udc)
1873 {
1874 	u32 tmp;
1875 	unsigned int loops;
1876 
1877 	udc->ep0_dir = EP_DIR_OUT;
1878 	udc->ep0_state = WAIT_FOR_SETUP;
1879 	udc->remote_wakeup = 0;		/* default to 0 on reset */
1880 
1881 	/* The address bits are past bit 25-31. Set the address */
1882 	tmp = readl(&udc->op_regs->deviceaddr);
1883 	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1884 	writel(tmp, &udc->op_regs->deviceaddr);
1885 
1886 	/* Clear all the setup token semaphores */
1887 	tmp = readl(&udc->op_regs->epsetupstat);
1888 	writel(tmp, &udc->op_regs->epsetupstat);
1889 
1890 	/* Clear all the endpoint complete status bits */
1891 	tmp = readl(&udc->op_regs->epcomplete);
1892 	writel(tmp, &udc->op_regs->epcomplete);
1893 
1894 	/* wait until all endptprime bits cleared */
1895 	loops = LOOPS(PRIME_TIMEOUT);
1896 	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1897 		if (loops == 0) {
1898 			dev_err(&udc->dev->dev,
1899 				"Timeout for ENDPTPRIME = 0x%x\n",
1900 				readl(&udc->op_regs->epprime));
1901 			break;
1902 		}
1903 		loops--;
1904 		udelay(LOOPS_USEC);
1905 	}
1906 
1907 	/* Write 1s to the Flush register */
1908 	writel((u32)~0, &udc->op_regs->epflush);
1909 
1910 	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1911 		dev_info(&udc->dev->dev, "usb bus reset\n");
1912 		udc->usb_state = USB_STATE_DEFAULT;
1913 		/* reset all the queues, stop all USB activities */
1914 		stop_activity(udc, udc->driver);
1915 	} else {
1916 		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1917 			readl(&udc->op_regs->portsc));
1918 
1919 		/*
1920 		 * re-initialize
1921 		 * controller reset
1922 		 */
1923 		udc_reset(udc);
1924 
1925 		/* reset all the queues, stop all USB activities */
1926 		stop_activity(udc, udc->driver);
1927 
1928 		/* reset ep0 dQH and endptctrl */
1929 		ep0_reset(udc);
1930 
1931 		/* enable interrupt and set controller to run state */
1932 		udc_start(udc);
1933 
1934 		udc->usb_state = USB_STATE_ATTACHED;
1935 	}
1936 }
1937 
handle_bus_resume(struct mv_udc * udc)1938 static void handle_bus_resume(struct mv_udc *udc)
1939 {
1940 	udc->usb_state = udc->resume_state;
1941 	udc->resume_state = 0;
1942 
1943 	/* report resume to the driver */
1944 	if (udc->driver) {
1945 		if (udc->driver->resume) {
1946 			spin_unlock(&udc->lock);
1947 			udc->driver->resume(&udc->gadget);
1948 			spin_lock(&udc->lock);
1949 		}
1950 	}
1951 }
1952 
irq_process_suspend(struct mv_udc * udc)1953 static void irq_process_suspend(struct mv_udc *udc)
1954 {
1955 	udc->resume_state = udc->usb_state;
1956 	udc->usb_state = USB_STATE_SUSPENDED;
1957 
1958 	if (udc->driver->suspend) {
1959 		spin_unlock(&udc->lock);
1960 		udc->driver->suspend(&udc->gadget);
1961 		spin_lock(&udc->lock);
1962 	}
1963 }
1964 
irq_process_port_change(struct mv_udc * udc)1965 static void irq_process_port_change(struct mv_udc *udc)
1966 {
1967 	u32 portsc;
1968 
1969 	portsc = readl(&udc->op_regs->portsc[0]);
1970 	if (!(portsc & PORTSCX_PORT_RESET)) {
1971 		/* Get the speed */
1972 		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1973 		switch (speed) {
1974 		case PORTSCX_PORT_SPEED_HIGH:
1975 			udc->gadget.speed = USB_SPEED_HIGH;
1976 			break;
1977 		case PORTSCX_PORT_SPEED_FULL:
1978 			udc->gadget.speed = USB_SPEED_FULL;
1979 			break;
1980 		case PORTSCX_PORT_SPEED_LOW:
1981 			udc->gadget.speed = USB_SPEED_LOW;
1982 			break;
1983 		default:
1984 			udc->gadget.speed = USB_SPEED_UNKNOWN;
1985 			break;
1986 		}
1987 	}
1988 
1989 	if (portsc & PORTSCX_PORT_SUSPEND) {
1990 		udc->resume_state = udc->usb_state;
1991 		udc->usb_state = USB_STATE_SUSPENDED;
1992 		if (udc->driver->suspend) {
1993 			spin_unlock(&udc->lock);
1994 			udc->driver->suspend(&udc->gadget);
1995 			spin_lock(&udc->lock);
1996 		}
1997 	}
1998 
1999 	if (!(portsc & PORTSCX_PORT_SUSPEND)
2000 		&& udc->usb_state == USB_STATE_SUSPENDED) {
2001 		handle_bus_resume(udc);
2002 	}
2003 
2004 	if (!udc->resume_state)
2005 		udc->usb_state = USB_STATE_DEFAULT;
2006 }
2007 
irq_process_error(struct mv_udc * udc)2008 static void irq_process_error(struct mv_udc *udc)
2009 {
2010 	/* Increment the error count */
2011 	udc->errors++;
2012 }
2013 
mv_udc_irq(int irq,void * dev)2014 static irqreturn_t mv_udc_irq(int irq, void *dev)
2015 {
2016 	struct mv_udc *udc = (struct mv_udc *)dev;
2017 	u32 status, intr;
2018 
2019 	/* Disable ISR when stopped bit is set */
2020 	if (udc->stopped)
2021 		return IRQ_NONE;
2022 
2023 	spin_lock(&udc->lock);
2024 
2025 	status = readl(&udc->op_regs->usbsts);
2026 	intr = readl(&udc->op_regs->usbintr);
2027 	status &= intr;
2028 
2029 	if (status == 0) {
2030 		spin_unlock(&udc->lock);
2031 		return IRQ_NONE;
2032 	}
2033 
2034 	/* Clear all the interrupts occurred */
2035 	writel(status, &udc->op_regs->usbsts);
2036 
2037 	if (status & USBSTS_ERR)
2038 		irq_process_error(udc);
2039 
2040 	if (status & USBSTS_RESET)
2041 		irq_process_reset(udc);
2042 
2043 	if (status & USBSTS_PORT_CHANGE)
2044 		irq_process_port_change(udc);
2045 
2046 	if (status & USBSTS_INT)
2047 		irq_process_tr_complete(udc);
2048 
2049 	if (status & USBSTS_SUSPEND)
2050 		irq_process_suspend(udc);
2051 
2052 	spin_unlock(&udc->lock);
2053 
2054 	return IRQ_HANDLED;
2055 }
2056 
mv_udc_vbus_irq(int irq,void * dev)2057 static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2058 {
2059 	struct mv_udc *udc = (struct mv_udc *)dev;
2060 
2061 	/* polling VBUS and init phy may cause too much time*/
2062 	if (udc->qwork)
2063 		queue_work(udc->qwork, &udc->vbus_work);
2064 
2065 	return IRQ_HANDLED;
2066 }
2067 
mv_udc_vbus_work(struct work_struct * work)2068 static void mv_udc_vbus_work(struct work_struct *work)
2069 {
2070 	struct mv_udc *udc;
2071 	unsigned int vbus;
2072 
2073 	udc = container_of(work, struct mv_udc, vbus_work);
2074 	if (!udc->pdata->vbus)
2075 		return;
2076 
2077 	vbus = udc->pdata->vbus->poll();
2078 	dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2079 
2080 	if (vbus == VBUS_HIGH)
2081 		mv_udc_vbus_session(&udc->gadget, 1);
2082 	else if (vbus == VBUS_LOW)
2083 		mv_udc_vbus_session(&udc->gadget, 0);
2084 }
2085 
2086 /* release device structure */
gadget_release(struct device * _dev)2087 static void gadget_release(struct device *_dev)
2088 {
2089 	struct mv_udc *udc = the_controller;
2090 
2091 	complete(udc->done);
2092 }
2093 
mv_udc_remove(struct platform_device * dev)2094 static int __devexit mv_udc_remove(struct platform_device *dev)
2095 {
2096 	struct mv_udc *udc = the_controller;
2097 	int clk_i;
2098 
2099 	usb_del_gadget_udc(&udc->gadget);
2100 
2101 	if (udc->qwork) {
2102 		flush_workqueue(udc->qwork);
2103 		destroy_workqueue(udc->qwork);
2104 	}
2105 
2106 	/*
2107 	 * If we have transceiver inited,
2108 	 * then vbus irq will not be requested in udc driver.
2109 	 */
2110 	if (udc->pdata && udc->pdata->vbus
2111 		&& udc->clock_gating && udc->transceiver == NULL)
2112 		free_irq(udc->pdata->vbus->irq, &dev->dev);
2113 
2114 	/* free memory allocated in probe */
2115 	if (udc->dtd_pool)
2116 		dma_pool_destroy(udc->dtd_pool);
2117 
2118 	if (udc->ep_dqh)
2119 		dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2120 			udc->ep_dqh, udc->ep_dqh_dma);
2121 
2122 	kfree(udc->eps);
2123 
2124 	if (udc->irq)
2125 		free_irq(udc->irq, &dev->dev);
2126 
2127 	mv_udc_disable(udc);
2128 
2129 	if (udc->cap_regs)
2130 		iounmap(udc->cap_regs);
2131 
2132 	if (udc->phy_regs)
2133 		iounmap(udc->phy_regs);
2134 
2135 	if (udc->status_req) {
2136 		kfree(udc->status_req->req.buf);
2137 		kfree(udc->status_req);
2138 	}
2139 
2140 	for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
2141 		clk_put(udc->clk[clk_i]);
2142 
2143 	device_unregister(&udc->gadget.dev);
2144 
2145 	/* free dev, wait for the release() finished */
2146 	wait_for_completion(udc->done);
2147 	kfree(udc);
2148 
2149 	the_controller = NULL;
2150 
2151 	return 0;
2152 }
2153 
mv_udc_probe(struct platform_device * dev)2154 static int __devinit mv_udc_probe(struct platform_device *dev)
2155 {
2156 	struct mv_usb_platform_data *pdata = dev->dev.platform_data;
2157 	struct mv_udc *udc;
2158 	int retval = 0;
2159 	int clk_i = 0;
2160 	struct resource *r;
2161 	size_t size;
2162 
2163 	if (pdata == NULL) {
2164 		dev_err(&dev->dev, "missing platform_data\n");
2165 		return -ENODEV;
2166 	}
2167 
2168 	size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
2169 	udc = kzalloc(size, GFP_KERNEL);
2170 	if (udc == NULL) {
2171 		dev_err(&dev->dev, "failed to allocate memory for udc\n");
2172 		return -ENOMEM;
2173 	}
2174 
2175 	the_controller = udc;
2176 	udc->done = &release_done;
2177 	udc->pdata = dev->dev.platform_data;
2178 	spin_lock_init(&udc->lock);
2179 
2180 	udc->dev = dev;
2181 
2182 #ifdef CONFIG_USB_OTG_UTILS
2183 	if (pdata->mode == MV_USB_MODE_OTG)
2184 		udc->transceiver = usb_get_transceiver();
2185 #endif
2186 
2187 	udc->clknum = pdata->clknum;
2188 	for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
2189 		udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
2190 		if (IS_ERR(udc->clk[clk_i])) {
2191 			retval = PTR_ERR(udc->clk[clk_i]);
2192 			goto err_put_clk;
2193 		}
2194 	}
2195 
2196 	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2197 	if (r == NULL) {
2198 		dev_err(&dev->dev, "no I/O memory resource defined\n");
2199 		retval = -ENODEV;
2200 		goto err_put_clk;
2201 	}
2202 
2203 	udc->cap_regs = (struct mv_cap_regs __iomem *)
2204 		ioremap(r->start, resource_size(r));
2205 	if (udc->cap_regs == NULL) {
2206 		dev_err(&dev->dev, "failed to map I/O memory\n");
2207 		retval = -EBUSY;
2208 		goto err_put_clk;
2209 	}
2210 
2211 	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2212 	if (r == NULL) {
2213 		dev_err(&dev->dev, "no phy I/O memory resource defined\n");
2214 		retval = -ENODEV;
2215 		goto err_iounmap_capreg;
2216 	}
2217 
2218 	udc->phy_regs = ioremap(r->start, resource_size(r));
2219 	if (udc->phy_regs == NULL) {
2220 		dev_err(&dev->dev, "failed to map phy I/O memory\n");
2221 		retval = -EBUSY;
2222 		goto err_iounmap_capreg;
2223 	}
2224 
2225 	/* we will acces controller register, so enable the clk */
2226 	retval = mv_udc_enable_internal(udc);
2227 	if (retval)
2228 		goto err_iounmap_phyreg;
2229 
2230 	udc->op_regs =
2231 		(struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2232 		+ (readl(&udc->cap_regs->caplength_hciversion)
2233 			& CAPLENGTH_MASK));
2234 	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2235 
2236 	/*
2237 	 * some platform will use usb to download image, it may not disconnect
2238 	 * usb gadget before loading kernel. So first stop udc here.
2239 	 */
2240 	udc_stop(udc);
2241 	writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2242 
2243 	size = udc->max_eps * sizeof(struct mv_dqh) *2;
2244 	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2245 	udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
2246 					&udc->ep_dqh_dma, GFP_KERNEL);
2247 
2248 	if (udc->ep_dqh == NULL) {
2249 		dev_err(&dev->dev, "allocate dQH memory failed\n");
2250 		retval = -ENOMEM;
2251 		goto err_disable_clock;
2252 	}
2253 	udc->ep_dqh_size = size;
2254 
2255 	/* create dTD dma_pool resource */
2256 	udc->dtd_pool = dma_pool_create("mv_dtd",
2257 			&dev->dev,
2258 			sizeof(struct mv_dtd),
2259 			DTD_ALIGNMENT,
2260 			DMA_BOUNDARY);
2261 
2262 	if (!udc->dtd_pool) {
2263 		retval = -ENOMEM;
2264 		goto err_free_dma;
2265 	}
2266 
2267 	size = udc->max_eps * sizeof(struct mv_ep) *2;
2268 	udc->eps = kzalloc(size, GFP_KERNEL);
2269 	if (udc->eps == NULL) {
2270 		dev_err(&dev->dev, "allocate ep memory failed\n");
2271 		retval = -ENOMEM;
2272 		goto err_destroy_dma;
2273 	}
2274 
2275 	/* initialize ep0 status request structure */
2276 	udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2277 	if (!udc->status_req) {
2278 		dev_err(&dev->dev, "allocate status_req memory failed\n");
2279 		retval = -ENOMEM;
2280 		goto err_free_eps;
2281 	}
2282 	INIT_LIST_HEAD(&udc->status_req->queue);
2283 
2284 	/* allocate a small amount of memory to get valid address */
2285 	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2286 	udc->status_req->req.dma = DMA_ADDR_INVALID;
2287 
2288 	udc->resume_state = USB_STATE_NOTATTACHED;
2289 	udc->usb_state = USB_STATE_POWERED;
2290 	udc->ep0_dir = EP_DIR_OUT;
2291 	udc->remote_wakeup = 0;
2292 
2293 	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2294 	if (r == NULL) {
2295 		dev_err(&dev->dev, "no IRQ resource defined\n");
2296 		retval = -ENODEV;
2297 		goto err_free_status_req;
2298 	}
2299 	udc->irq = r->start;
2300 	if (request_irq(udc->irq, mv_udc_irq,
2301 		IRQF_SHARED, driver_name, udc)) {
2302 		dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2303 			udc->irq);
2304 		retval = -ENODEV;
2305 		goto err_free_status_req;
2306 	}
2307 
2308 	/* initialize gadget structure */
2309 	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
2310 	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
2311 	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
2312 	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
2313 	udc->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
2314 
2315 	/* the "gadget" abstracts/virtualizes the controller */
2316 	dev_set_name(&udc->gadget.dev, "gadget");
2317 	udc->gadget.dev.parent = &dev->dev;
2318 	udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2319 	udc->gadget.dev.release = gadget_release;
2320 	udc->gadget.name = driver_name;		/* gadget name */
2321 
2322 	retval = device_register(&udc->gadget.dev);
2323 	if (retval)
2324 		goto err_free_irq;
2325 
2326 	eps_init(udc);
2327 
2328 	/* VBUS detect: we can disable/enable clock on demand.*/
2329 	if (udc->transceiver)
2330 		udc->clock_gating = 1;
2331 	else if (pdata->vbus) {
2332 		udc->clock_gating = 1;
2333 		retval = request_threaded_irq(pdata->vbus->irq, NULL,
2334 				mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2335 		if (retval) {
2336 			dev_info(&dev->dev,
2337 				"Can not request irq for VBUS, "
2338 				"disable clock gating\n");
2339 			udc->clock_gating = 0;
2340 		}
2341 
2342 		udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2343 		if (!udc->qwork) {
2344 			dev_err(&dev->dev, "cannot create workqueue\n");
2345 			retval = -ENOMEM;
2346 			goto err_unregister;
2347 		}
2348 
2349 		INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2350 	}
2351 
2352 	/*
2353 	 * When clock gating is supported, we can disable clk and phy.
2354 	 * If not, it means that VBUS detection is not supported, we
2355 	 * have to enable vbus active all the time to let controller work.
2356 	 */
2357 	if (udc->clock_gating)
2358 		mv_udc_disable_internal(udc);
2359 	else
2360 		udc->vbus_active = 1;
2361 
2362 	retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
2363 	if (retval)
2364 		goto err_unregister;
2365 
2366 	dev_info(&dev->dev, "successful probe UDC device %s clock gating.\n",
2367 		udc->clock_gating ? "with" : "without");
2368 
2369 	return 0;
2370 
2371 err_unregister:
2372 	if (udc->pdata && udc->pdata->vbus
2373 		&& udc->clock_gating && udc->transceiver == NULL)
2374 		free_irq(pdata->vbus->irq, &dev->dev);
2375 	device_unregister(&udc->gadget.dev);
2376 err_free_irq:
2377 	free_irq(udc->irq, &dev->dev);
2378 err_free_status_req:
2379 	kfree(udc->status_req->req.buf);
2380 	kfree(udc->status_req);
2381 err_free_eps:
2382 	kfree(udc->eps);
2383 err_destroy_dma:
2384 	dma_pool_destroy(udc->dtd_pool);
2385 err_free_dma:
2386 	dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2387 			udc->ep_dqh, udc->ep_dqh_dma);
2388 err_disable_clock:
2389 	mv_udc_disable_internal(udc);
2390 err_iounmap_phyreg:
2391 	iounmap(udc->phy_regs);
2392 err_iounmap_capreg:
2393 	iounmap(udc->cap_regs);
2394 err_put_clk:
2395 	for (clk_i--; clk_i >= 0; clk_i--)
2396 		clk_put(udc->clk[clk_i]);
2397 	the_controller = NULL;
2398 	kfree(udc);
2399 	return retval;
2400 }
2401 
2402 #ifdef CONFIG_PM
mv_udc_suspend(struct device * _dev)2403 static int mv_udc_suspend(struct device *_dev)
2404 {
2405 	struct mv_udc *udc = the_controller;
2406 
2407 	/* if OTG is enabled, the following will be done in OTG driver*/
2408 	if (udc->transceiver)
2409 		return 0;
2410 
2411 	if (udc->pdata->vbus && udc->pdata->vbus->poll)
2412 		if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2413 			dev_info(&udc->dev->dev, "USB cable is connected!\n");
2414 			return -EAGAIN;
2415 		}
2416 
2417 	/*
2418 	 * only cable is unplugged, udc can suspend.
2419 	 * So do not care about clock_gating == 1.
2420 	 */
2421 	if (!udc->clock_gating) {
2422 		udc_stop(udc);
2423 
2424 		spin_lock_irq(&udc->lock);
2425 		/* stop all usb activities */
2426 		stop_activity(udc, udc->driver);
2427 		spin_unlock_irq(&udc->lock);
2428 
2429 		mv_udc_disable_internal(udc);
2430 	}
2431 
2432 	return 0;
2433 }
2434 
mv_udc_resume(struct device * _dev)2435 static int mv_udc_resume(struct device *_dev)
2436 {
2437 	struct mv_udc *udc = the_controller;
2438 	int retval;
2439 
2440 	/* if OTG is enabled, the following will be done in OTG driver*/
2441 	if (udc->transceiver)
2442 		return 0;
2443 
2444 	if (!udc->clock_gating) {
2445 		retval = mv_udc_enable_internal(udc);
2446 		if (retval)
2447 			return retval;
2448 
2449 		if (udc->driver && udc->softconnect) {
2450 			udc_reset(udc);
2451 			ep0_reset(udc);
2452 			udc_start(udc);
2453 		}
2454 	}
2455 
2456 	return 0;
2457 }
2458 
2459 static const struct dev_pm_ops mv_udc_pm_ops = {
2460 	.suspend	= mv_udc_suspend,
2461 	.resume		= mv_udc_resume,
2462 };
2463 #endif
2464 
mv_udc_shutdown(struct platform_device * dev)2465 static void mv_udc_shutdown(struct platform_device *dev)
2466 {
2467 	struct mv_udc *udc = the_controller;
2468 	u32 mode;
2469 
2470 	/* reset controller mode to IDLE */
2471 	mode = readl(&udc->op_regs->usbmode);
2472 	mode &= ~3;
2473 	writel(mode, &udc->op_regs->usbmode);
2474 }
2475 
2476 static struct platform_driver udc_driver = {
2477 	.probe		= mv_udc_probe,
2478 	.remove		= __exit_p(mv_udc_remove),
2479 	.shutdown	= mv_udc_shutdown,
2480 	.driver		= {
2481 		.owner	= THIS_MODULE,
2482 		.name	= "mv-udc",
2483 #ifdef CONFIG_PM
2484 		.pm	= &mv_udc_pm_ops,
2485 #endif
2486 	},
2487 };
2488 
2489 module_platform_driver(udc_driver);
2490 MODULE_ALIAS("platform:mv-udc");
2491 MODULE_DESCRIPTION(DRIVER_DESC);
2492 MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2493 MODULE_VERSION(DRIVER_VERSION);
2494 MODULE_LICENSE("GPL");
2495