1 /*
2  * BRIEF MODULE DESCRIPTION
3  *	Au1000 USB Device-Side (device layer)
4  *
5  * Copyright 2001-2002 MontaVista Software Inc.
6  * Author: MontaVista Software, Inc.
7  *		stevel@mvista.com or source@mvista.com
8  *
9  *  This program is free software; you can redistribute	 it and/or modify it
10  *  under  the terms of	 the GNU General  Public License as published by the
11  *  Free Software Foundation;  either version 2 of the	License, or (at your
12  *  option) any later version.
13  *
14  *  THIS  SOFTWARE  IS PROVIDED	  ``AS	IS'' AND   ANY	EXPRESS OR IMPLIED
15  *  WARRANTIES,	  INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
16  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
17  *  NO	EVENT  SHALL   THE AUTHOR  BE	 LIABLE FOR ANY	  DIRECT, INDIRECT,
18  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  *  NOT LIMITED	  TO, PROCUREMENT OF  SUBSTITUTE GOODS	OR SERVICES; LOSS OF
20  *  USE, DATA,	OR PROFITS; OR	BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21  *  ANY THEORY OF LIABILITY, WHETHER IN	 CONTRACT, STRICT LIABILITY, OR TORT
22  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  *
25  *  You should have received a copy of the  GNU General Public License along
26  *  with this program; if not, write  to the Free Software Foundation, Inc.,
27  *  675 Mass Ave, Cambridge, MA 02139, USA.
28  */
29 
30 #include <linux/config.h>
31 #include <linux/kernel.h>
32 #include <linux/ioport.h>
33 #include <linux/sched.h>
34 #include <linux/signal.h>
35 #include <linux/errno.h>
36 #include <linux/poll.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/fcntl.h>
40 #include <linux/module.h>
41 #include <linux/spinlock.h>
42 #include <linux/list.h>
43 #include <linux/smp_lock.h>
44 #define DEBUG
45 #include <linux/usb.h>
46 
47 #include <asm/io.h>
48 #include <asm/uaccess.h>
49 #include <asm/irq.h>
50 #include <asm/mipsregs.h>
51 #include <asm/au1000.h>
52 #include <asm/au1000_dma.h>
53 #include <asm/au1000_usbdev.h>
54 
55 #ifdef DEBUG
56 #undef VDEBUG
57 #ifdef VDEBUG
58 #define vdbg(fmt, arg...) printk(KERN_DEBUG __FILE__ ": " fmt "\n" , ## arg)
59 #else
60 #define vdbg(fmt, arg...) do {} while (0)
61 #endif
62 #else
63 #define vdbg(fmt, arg...) do {} while (0)
64 #endif
65 
66 #define MAX(a,b)	(((a)>(b))?(a):(b))
67 
68 #define ALLOC_FLAGS (in_interrupt () ? GFP_ATOMIC : GFP_KERNEL)
69 
70 #define EP_FIFO_DEPTH 8
71 
72 typedef enum {
73 	SETUP_STAGE = 0,
74 	DATA_STAGE,
75 	STATUS_STAGE
76 } ep0_stage_t;
77 
78 typedef struct {
79 	int read_fifo;
80 	int write_fifo;
81 	int ctrl_stat;
82 	int read_fifo_status;
83 	int write_fifo_status;
84 } endpoint_reg_t;
85 
86 typedef struct {
87 	usbdev_pkt_t *head;
88 	usbdev_pkt_t *tail;
89 	int count;
90 } pkt_list_t;
91 
92 typedef struct {
93 	int active;
94 	struct usb_endpoint_descriptor *desc;
95 	endpoint_reg_t *reg;
96 	/* Only one of these are used, unless this is the control ep */
97 	pkt_list_t inlist;
98 	pkt_list_t outlist;
99 	unsigned int indma, outdma; /* DMA channel numbers for IN, OUT */
100 	/* following are extracted from endpoint descriptor for easy access */
101 	int max_pkt_size;
102 	int type;
103 	int direction;
104 	/* WE assign endpoint addresses! */
105 	int address;
106 	spinlock_t lock;
107 } endpoint_t;
108 
109 
110 static struct usb_dev {
111 	endpoint_t ep[6];
112 	ep0_stage_t ep0_stage;
113 
114 	struct usb_device_descriptor *   dev_desc;
115 	struct usb_interface_descriptor* if_desc;
116 	struct usb_config_descriptor *   conf_desc;
117 	u8 *                             full_conf_desc;
118 	struct usb_string_descriptor *   str_desc[6];
119 
120 	/* callback to function layer */
121 	void (*func_cb)(usbdev_cb_type_t type, unsigned long arg,
122 			void *cb_data);
123 	void* cb_data;
124 
125 	usbdev_state_t state;	// device state
126 	int suspended;		// suspended flag
127 	int address;		// device address
128 	int interface;
129 	int num_ep;
130 	u8 alternate_setting;
131 	u8 configuration;	// configuration value
132 	int remote_wakeup_en;
133 } usbdev;
134 
135 
136 static endpoint_reg_t ep_reg[] = {
137 	// FIFO's 0 and 1 are EP0 default control
138 	{USBD_EP0RD, USBD_EP0WR, USBD_EP0CS, USBD_EP0RDSTAT, USBD_EP0WRSTAT },
139 	{0},
140 	// FIFO 2 is EP2, IN
141 	{ -1, USBD_EP2WR, USBD_EP2CS, -1, USBD_EP2WRSTAT },
142 	// FIFO 3 is EP3, IN
143 	{    -1,     USBD_EP3WR, USBD_EP3CS,     -1,         USBD_EP3WRSTAT },
144 	// FIFO 4 is EP4, OUT
145 	{USBD_EP4RD,     -1,     USBD_EP4CS, USBD_EP4RDSTAT,     -1         },
146 	// FIFO 5 is EP5, OUT
147 	{USBD_EP5RD,     -1,     USBD_EP5CS, USBD_EP5RDSTAT,     -1         }
148 };
149 
150 static struct {
151 	unsigned int id;
152 	const char *str;
153 } ep_dma_id[] = {
154 	{ DMA_ID_USBDEV_EP0_TX, "USBDev EP0 IN" },
155 	{ DMA_ID_USBDEV_EP0_RX, "USBDev EP0 OUT" },
156 	{ DMA_ID_USBDEV_EP2_TX, "USBDev EP2 IN" },
157 	{ DMA_ID_USBDEV_EP3_TX, "USBDev EP3 IN" },
158 	{ DMA_ID_USBDEV_EP4_RX, "USBDev EP4 OUT" },
159 	{ DMA_ID_USBDEV_EP5_RX, "USBDev EP5 OUT" }
160 };
161 
162 #define DIR_OUT 0
163 #define DIR_IN  (1<<3)
164 
165 #define CONTROL_EP USB_ENDPOINT_XFER_CONTROL
166 #define BULK_EP    USB_ENDPOINT_XFER_BULK
167 
168 static inline endpoint_t *
epaddr_to_ep(struct usb_dev * dev,int ep_addr)169 epaddr_to_ep(struct usb_dev* dev, int ep_addr)
170 {
171 	if (ep_addr >= 0 && ep_addr < 2)
172 		return &dev->ep[0];
173 	if (ep_addr < 6)
174 		return &dev->ep[ep_addr];
175 	return NULL;
176 }
177 
178 static const char* std_req_name[] = {
179 	"GET_STATUS",
180 	"CLEAR_FEATURE",
181 	"RESERVED",
182 	"SET_FEATURE",
183 	"RESERVED",
184 	"SET_ADDRESS",
185 	"GET_DESCRIPTOR",
186 	"SET_DESCRIPTOR",
187 	"GET_CONFIGURATION",
188 	"SET_CONFIGURATION",
189 	"GET_INTERFACE",
190 	"SET_INTERFACE",
191 	"SYNCH_FRAME"
192 };
193 
194 static inline const char*
get_std_req_name(int req)195 get_std_req_name(int req)
196 {
197 	return (req >= 0 && req <= 12) ? std_req_name[req] : "UNKNOWN";
198 }
199 
200 #if 0
201 static void
202 dump_setup(struct usb_ctrlrequest* s)
203 {
204 	dbg(__FUNCTION__ ": requesttype=%d", s->requesttype);
205 	dbg(__FUNCTION__ ": request=%d %s", s->request,
206 	    get_std_req_name(s->request));
207 	dbg(__FUNCTION__ ": value=0x%04x", s->wValue);
208 	dbg(__FUNCTION__ ": index=%d", s->index);
209 	dbg(__FUNCTION__ ": length=%d", s->length);
210 }
211 #endif
212 
213 static inline usbdev_pkt_t *
alloc_packet(endpoint_t * ep,int data_size,void * data)214 alloc_packet(endpoint_t * ep, int data_size, void* data)
215 {
216 	usbdev_pkt_t* pkt =
217 		(usbdev_pkt_t *)kmalloc(sizeof(usbdev_pkt_t) + data_size,
218 					ALLOC_FLAGS);
219 	if (!pkt)
220 		return NULL;
221 	pkt->ep_addr = ep->address;
222 	pkt->size = data_size;
223 	pkt->status = 0;
224 	pkt->next = NULL;
225 	if (data)
226 		memcpy(pkt->payload, data, data_size);
227 
228 	return pkt;
229 }
230 
231 
232 /*
233  * Link a packet to the tail of the enpoint's packet list.
234  * EP spinlock must be held when calling.
235  */
236 static void
link_tail(endpoint_t * ep,pkt_list_t * list,usbdev_pkt_t * pkt)237 link_tail(endpoint_t * ep, pkt_list_t * list, usbdev_pkt_t * pkt)
238 {
239 	if (!list->tail) {
240 		list->head = list->tail = pkt;
241 		list->count = 1;
242 	} else {
243 		list->tail->next = pkt;
244 		list->tail = pkt;
245 		list->count++;
246 	}
247 }
248 
249 /*
250  * Unlink and return a packet from the head of the given packet
251  * list. It is the responsibility of the caller to free the packet.
252  * EP spinlock must be held when calling.
253  */
254 static usbdev_pkt_t *
unlink_head(pkt_list_t * list)255 unlink_head(pkt_list_t * list)
256 {
257 	usbdev_pkt_t *pkt;
258 
259 	pkt = list->head;
260 	if (!pkt || !list->count) {
261 		return NULL;
262 	}
263 
264 	list->head = pkt->next;
265 	if (!list->head) {
266 		list->head = list->tail = NULL;
267 		list->count = 0;
268 	} else
269 		list->count--;
270 
271 	return pkt;
272 }
273 
274 /*
275  * Create and attach a new packet to the tail of the enpoint's
276  * packet list. EP spinlock must be held when calling.
277  */
278 static usbdev_pkt_t *
add_packet(endpoint_t * ep,pkt_list_t * list,int size)279 add_packet(endpoint_t * ep, pkt_list_t * list, int size)
280 {
281 	usbdev_pkt_t *pkt = alloc_packet(ep, size, NULL);
282 	if (!pkt)
283 		return NULL;
284 
285 	link_tail(ep, list, pkt);
286 	return pkt;
287 }
288 
289 
290 /*
291  * Unlink and free a packet from the head of the enpoint's
292  * packet list. EP spinlock must be held when calling.
293  */
294 static inline void
free_packet(pkt_list_t * list)295 free_packet(pkt_list_t * list)
296 {
297 	kfree(unlink_head(list));
298 }
299 
300 /* EP spinlock must be held when calling. */
301 static inline void
flush_pkt_list(pkt_list_t * list)302 flush_pkt_list(pkt_list_t * list)
303 {
304 	while (list->count)
305 		free_packet(list);
306 }
307 
308 /* EP spinlock must be held when calling */
309 static inline void
flush_write_fifo(endpoint_t * ep)310 flush_write_fifo(endpoint_t * ep)
311 {
312 	if (ep->reg->write_fifo_status >= 0) {
313 		au_writel(USBDEV_FSTAT_FLUSH | USBDEV_FSTAT_UF |
314 			  USBDEV_FSTAT_OF,
315 			  ep->reg->write_fifo_status);
316 		//udelay(100);
317 		//au_writel(USBDEV_FSTAT_UF | USBDEV_FSTAT_OF,
318 		//	  ep->reg->write_fifo_status);
319 	}
320 }
321 
322 /* EP spinlock must be held when calling */
323 static inline void
flush_read_fifo(endpoint_t * ep)324 flush_read_fifo(endpoint_t * ep)
325 {
326 	if (ep->reg->read_fifo_status >= 0) {
327 		au_writel(USBDEV_FSTAT_FLUSH | USBDEV_FSTAT_UF |
328 			  USBDEV_FSTAT_OF,
329 			  ep->reg->read_fifo_status);
330 		//udelay(100);
331 		//au_writel(USBDEV_FSTAT_UF | USBDEV_FSTAT_OF,
332 		//	  ep->reg->read_fifo_status);
333 	}
334 }
335 
336 
337 /* EP spinlock must be held when calling. */
338 static void
endpoint_flush(endpoint_t * ep)339 endpoint_flush(endpoint_t * ep)
340 {
341 	// First, flush all packets
342 	flush_pkt_list(&ep->inlist);
343 	flush_pkt_list(&ep->outlist);
344 
345 	// Now flush the endpoint's h/w FIFO(s)
346 	flush_write_fifo(ep);
347 	flush_read_fifo(ep);
348 }
349 
350 /* EP spinlock must be held when calling. */
351 static void
endpoint_stall(endpoint_t * ep)352 endpoint_stall(endpoint_t * ep)
353 {
354 	u32 cs;
355 
356 	warn(__FUNCTION__);
357 
358 	cs = au_readl(ep->reg->ctrl_stat) | USBDEV_CS_STALL;
359 	au_writel(cs, ep->reg->ctrl_stat);
360 }
361 
362 /* EP spinlock must be held when calling. */
363 static void
endpoint_unstall(endpoint_t * ep)364 endpoint_unstall(endpoint_t * ep)
365 {
366 	u32 cs;
367 
368 	warn(__FUNCTION__);
369 
370 	cs = au_readl(ep->reg->ctrl_stat) & ~USBDEV_CS_STALL;
371 	au_writel(cs, ep->reg->ctrl_stat);
372 }
373 
374 static void
endpoint_reset_datatoggle(endpoint_t * ep)375 endpoint_reset_datatoggle(endpoint_t * ep)
376 {
377 	// FIXME: is this possible?
378 }
379 
380 
381 /* EP spinlock must be held when calling. */
382 static int
endpoint_fifo_read(endpoint_t * ep)383 endpoint_fifo_read(endpoint_t * ep)
384 {
385 	int read_count = 0;
386 	u8 *bufptr;
387 	usbdev_pkt_t *pkt = ep->outlist.tail;
388 
389 	if (!pkt)
390 		return -EINVAL;
391 
392 	bufptr = &pkt->payload[pkt->size];
393 	while (au_readl(ep->reg->read_fifo_status) & USBDEV_FSTAT_FCNT_MASK) {
394 		*bufptr++ = au_readl(ep->reg->read_fifo) & 0xff;
395 		read_count++;
396 		pkt->size++;
397 	}
398 
399 	return read_count;
400 }
401 
402 #if 0
403 /* EP spinlock must be held when calling. */
404 static int
405 endpoint_fifo_write(endpoint_t * ep, int index)
406 {
407 	int write_count = 0;
408 	u8 *bufptr;
409 	usbdev_pkt_t *pkt = ep->inlist.head;
410 
411 	if (!pkt)
412 		return -EINVAL;
413 
414 	bufptr = &pkt->payload[index];
415 	while ((au_readl(ep->reg->write_fifo_status) &
416 		USBDEV_FSTAT_FCNT_MASK) < EP_FIFO_DEPTH) {
417 		if (bufptr < pkt->payload + pkt->size) {
418 			au_writel(*bufptr++, ep->reg->write_fifo);
419 			write_count++;
420 		} else {
421 			break;
422 		}
423 	}
424 
425 	return write_count;
426 }
427 #endif
428 
429 /*
430  * This routine is called to restart transmission of a packet.
431  * The endpoint's TSIZE must be set to the new packet's size,
432  * and DMA to the write FIFO needs to be restarted.
433  * EP spinlock must be held when calling.
434  */
435 static void
kickstart_send_packet(endpoint_t * ep)436 kickstart_send_packet(endpoint_t * ep)
437 {
438 	u32 cs;
439 	usbdev_pkt_t *pkt = ep->inlist.head;
440 
441 	vdbg(__FUNCTION__ ": ep%d, pkt=%p", ep->address, pkt);
442 
443 	if (!pkt) {
444 		err(__FUNCTION__ ": head=NULL! list->count=%d",
445 		    ep->inlist.count);
446 		return;
447 	}
448 
449 	dma_cache_wback_inv((unsigned long)pkt->payload, pkt->size);
450 
451 	/*
452 	 * make sure FIFO is empty
453 	 */
454 	flush_write_fifo(ep);
455 
456 	cs = au_readl(ep->reg->ctrl_stat) & USBDEV_CS_STALL;
457 	cs |= (pkt->size << USBDEV_CS_TSIZE_BIT);
458 	au_writel(cs, ep->reg->ctrl_stat);
459 
460 	if (get_dma_active_buffer(ep->indma) == 1) {
461 		set_dma_count1(ep->indma, pkt->size);
462 		set_dma_addr1(ep->indma, virt_to_phys(pkt->payload));
463 		enable_dma_buffer1(ep->indma);	// reenable
464 	} else {
465 		set_dma_count0(ep->indma, pkt->size);
466 		set_dma_addr0(ep->indma, virt_to_phys(pkt->payload));
467 		enable_dma_buffer0(ep->indma);	// reenable
468 	}
469 	if (dma_halted(ep->indma))
470 		start_dma(ep->indma);
471 }
472 
473 
474 /*
475  * This routine is called when a packet in the inlist has been
476  * completed. Frees the completed packet and starts sending the
477  * next. EP spinlock must be held when calling.
478  */
479 static usbdev_pkt_t *
send_packet_complete(endpoint_t * ep)480 send_packet_complete(endpoint_t * ep)
481 {
482 	usbdev_pkt_t *pkt = unlink_head(&ep->inlist);
483 
484 	if (pkt) {
485 		pkt->status =
486 			(au_readl(ep->reg->ctrl_stat) & USBDEV_CS_NAK) ?
487 			PKT_STATUS_NAK : PKT_STATUS_ACK;
488 
489 		vdbg(__FUNCTION__ ": ep%d, %s pkt=%p, list count=%d",
490 		     ep->address, (pkt->status & PKT_STATUS_NAK) ?
491 		     "NAK" : "ACK", pkt, ep->inlist.count);
492 	}
493 
494 	/*
495 	 * The write fifo should already be drained if things are
496 	 * working right, but flush it anyway just in case.
497 	 */
498 	flush_write_fifo(ep);
499 
500 	// begin transmitting next packet in the inlist
501 	if (ep->inlist.count) {
502 		kickstart_send_packet(ep);
503 	}
504 
505 	return pkt;
506 }
507 
508 /*
509  * Add a new packet to the tail of the given ep's packet
510  * inlist. The transmit complete interrupt frees packets from
511  * the head of this list. EP spinlock must be held when calling.
512  */
513 static int
send_packet(struct usb_dev * dev,usbdev_pkt_t * pkt,int async)514 send_packet(struct usb_dev* dev, usbdev_pkt_t *pkt, int async)
515 {
516 	pkt_list_t *list;
517 	endpoint_t* ep;
518 
519 	if (!pkt || !(ep = epaddr_to_ep(dev, pkt->ep_addr)))
520 		return -EINVAL;
521 
522 	if (!pkt->size)
523 		return 0;
524 
525 	list = &ep->inlist;
526 
527 	if (!async && list->count) {
528 		halt_dma(ep->indma);
529 		flush_pkt_list(list);
530 	}
531 
532 	link_tail(ep, list, pkt);
533 
534 	vdbg(__FUNCTION__ ": ep%d, pkt=%p, size=%d, list count=%d",
535 	     ep->address, pkt, pkt->size, list->count);
536 
537 	if (list->count == 1) {
538 		/*
539 		 * if the packet count is one, it means the list was empty,
540 		 * and no more data will go out this ep until we kick-start
541 		 * it again.
542 		 */
543 		kickstart_send_packet(ep);
544 	}
545 
546 	return pkt->size;
547 }
548 
549 /*
550  * This routine is called to restart reception of a packet.
551  * EP spinlock must be held when calling.
552  */
553 static void
kickstart_receive_packet(endpoint_t * ep)554 kickstart_receive_packet(endpoint_t * ep)
555 {
556 	usbdev_pkt_t *pkt;
557 
558 	// get and link a new packet for next reception
559 	if (!(pkt = add_packet(ep, &ep->outlist, ep->max_pkt_size))) {
560 		err(__FUNCTION__ ": could not alloc new packet");
561 		return;
562 	}
563 
564 	if (get_dma_active_buffer(ep->outdma) == 1) {
565 		clear_dma_done1(ep->outdma);
566 		set_dma_count1(ep->outdma, ep->max_pkt_size);
567 		set_dma_count0(ep->outdma, 0);
568 		set_dma_addr1(ep->outdma, virt_to_phys(pkt->payload));
569 		enable_dma_buffer1(ep->outdma);	// reenable
570 	} else {
571 		clear_dma_done0(ep->outdma);
572 		set_dma_count0(ep->outdma, ep->max_pkt_size);
573 		set_dma_count1(ep->outdma, 0);
574 		set_dma_addr0(ep->outdma, virt_to_phys(pkt->payload));
575 		enable_dma_buffer0(ep->outdma);	// reenable
576 	}
577 	if (dma_halted(ep->outdma))
578 		start_dma(ep->outdma);
579 }
580 
581 
582 /*
583  * This routine is called when a packet in the outlist has been
584  * completed (received) and we need to prepare for a new packet
585  * to be received. Halts DMA and computes the packet size from the
586  * remaining DMA counter. Then prepares a new packet for reception
587  * and restarts DMA. FIXME: what if another packet comes in
588  * on top of the completed packet? Counter would be wrong.
589  * EP spinlock must be held when calling.
590  */
591 static usbdev_pkt_t *
receive_packet_complete(endpoint_t * ep)592 receive_packet_complete(endpoint_t * ep)
593 {
594 	usbdev_pkt_t *pkt = ep->outlist.tail;
595 	u32 cs;
596 
597 	halt_dma(ep->outdma);
598 
599 	cs = au_readl(ep->reg->ctrl_stat);
600 
601 	if (!pkt)
602 		return NULL;
603 
604 	pkt->size = ep->max_pkt_size - get_dma_residue(ep->outdma);
605 	if (pkt->size)
606 		dma_cache_inv((unsigned long)pkt->payload, pkt->size);
607 	/*
608 	 * need to pull out any remaining bytes in the FIFO.
609 	 */
610 	endpoint_fifo_read(ep);
611 	/*
612 	 * should be drained now, but flush anyway just in case.
613 	 */
614 	flush_read_fifo(ep);
615 
616 	pkt->status = (cs & USBDEV_CS_NAK) ? PKT_STATUS_NAK : PKT_STATUS_ACK;
617 	if (ep->address == 0 && (cs & USBDEV_CS_SU))
618 		pkt->status |= PKT_STATUS_SU;
619 
620 	vdbg(__FUNCTION__ ": ep%d, %s pkt=%p, size=%d",
621 	     ep->address, (pkt->status & PKT_STATUS_NAK) ?
622 	     "NAK" : "ACK", pkt, pkt->size);
623 
624 	kickstart_receive_packet(ep);
625 
626 	return pkt;
627 }
628 
629 
630 /*
631  ****************************************************************************
632  * Here starts the standard device request handlers. They are
633  * all called by do_setup() via a table of function pointers.
634  ****************************************************************************
635  */
636 
637 static ep0_stage_t
do_get_status(struct usb_dev * dev,struct usb_ctrlrequest * setup)638 do_get_status(struct usb_dev* dev, struct usb_ctrlrequest* setup)
639 {
640 	switch (setup->bRequestType) {
641 	case 0x80:	// Device
642 		// FIXME: send device status
643 		break;
644 	case 0x81:	// Interface
645 		// FIXME: send interface status
646 		break;
647 	case 0x82:	// End Point
648 		// FIXME: send endpoint status
649 		break;
650 	default:
651 		// Invalid Command
652 		endpoint_stall(&dev->ep[0]); // Stall End Point 0
653 		break;
654 	}
655 
656 	return STATUS_STAGE;
657 }
658 
659 static ep0_stage_t
do_clear_feature(struct usb_dev * dev,struct usb_ctrlrequest * setup)660 do_clear_feature(struct usb_dev* dev, struct usb_ctrlrequest* setup)
661 {
662 	switch (setup->bRequestType) {
663 	case 0x00:	// Device
664 		if ((le16_to_cpu(setup->wValue) & 0xff) == 1)
665 			dev->remote_wakeup_en = 0;
666 	else
667 			endpoint_stall(&dev->ep[0]);
668 		break;
669 	case 0x02:	// End Point
670 		if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
671 			endpoint_t *ep =
672 				epaddr_to_ep(dev,
673 					     le16_to_cpu(setup->wIndex) & 0xff);
674 
675 			endpoint_unstall(ep);
676 			endpoint_reset_datatoggle(ep);
677 		} else
678 			endpoint_stall(&dev->ep[0]);
679 		break;
680 	}
681 
682 	return SETUP_STAGE;
683 }
684 
685 static ep0_stage_t
do_reserved(struct usb_dev * dev,struct usb_ctrlrequest * setup)686 do_reserved(struct usb_dev* dev, struct usb_ctrlrequest* setup)
687 {
688 	// Invalid request, stall End Point 0
689 	endpoint_stall(&dev->ep[0]);
690 	return SETUP_STAGE;
691 }
692 
693 static ep0_stage_t
do_set_feature(struct usb_dev * dev,struct usb_ctrlrequest * setup)694 do_set_feature(struct usb_dev* dev, struct usb_ctrlrequest* setup)
695 {
696 	switch (setup->bRequestType) {
697 	case 0x00:	// Device
698 		if ((le16_to_cpu(setup->wValue) & 0xff) == 1)
699 			dev->remote_wakeup_en = 1;
700 		else
701 			endpoint_stall(&dev->ep[0]);
702 		break;
703 	case 0x02:	// End Point
704 		if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
705 			endpoint_t *ep =
706 				epaddr_to_ep(dev,
707 					     le16_to_cpu(setup->wIndex) & 0xff);
708 
709 			endpoint_stall(ep);
710 		} else
711 			endpoint_stall(&dev->ep[0]);
712 		break;
713 	}
714 
715 	return SETUP_STAGE;
716 }
717 
718 static ep0_stage_t
do_set_address(struct usb_dev * dev,struct usb_ctrlrequest * setup)719 do_set_address(struct usb_dev* dev, struct usb_ctrlrequest* setup)
720 {
721 	int new_state = dev->state;
722 	int new_addr = le16_to_cpu(setup->wValue);
723 
724 	dbg(__FUNCTION__ ": our address=%d", new_addr);
725 
726 	if (new_addr > 127) {
727 			// usb spec doesn't tell us what to do, so just go to
728 			// default state
729 		new_state = DEFAULT;
730 		dev->address = 0;
731 	} else if (dev->address != new_addr) {
732 		dev->address = new_addr;
733 		new_state = ADDRESS;
734 	}
735 
736 	if (dev->state != new_state) {
737 		dev->state = new_state;
738 		/* inform function layer of usbdev state change */
739 		dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
740 	}
741 
742 	return SETUP_STAGE;
743 }
744 
745 static ep0_stage_t
do_get_descriptor(struct usb_dev * dev,struct usb_ctrlrequest * setup)746 do_get_descriptor(struct usb_dev* dev, struct usb_ctrlrequest* setup)
747 {
748 	int strnum, desc_len = le16_to_cpu(setup->wLength);
749 
750 		switch (le16_to_cpu(setup->wValue) >> 8) {
751 		case USB_DT_DEVICE:
752 			// send device descriptor!
753 		desc_len = desc_len > dev->dev_desc->bLength ?
754 			dev->dev_desc->bLength : desc_len;
755 			dbg("sending device desc, size=%d", desc_len);
756 		send_packet(dev, alloc_packet(&dev->ep[0], desc_len,
757 					      dev->dev_desc), 0);
758 			break;
759 		case USB_DT_CONFIG:
760 			// If the config descr index in low-byte of
761 			// setup->wValue	is valid, send config descr,
762 			// otherwise stall ep0.
763 			if ((le16_to_cpu(setup->wValue) & 0xff) == 0) {
764 				// send config descriptor!
765 				if (desc_len <= USB_DT_CONFIG_SIZE) {
766 					dbg("sending partial config desc, size=%d",
767 					     desc_len);
768 				send_packet(dev,
769 					    alloc_packet(&dev->ep[0],
770 							 desc_len,
771 							 dev->conf_desc),
772 					    0);
773 				} else {
774 				int len = dev->conf_desc->wTotalLength;
775 				dbg("sending whole config desc,"
776 				    " size=%d, our size=%d", desc_len, len);
777 				desc_len = desc_len > len ? len : desc_len;
778 				send_packet(dev,
779 					    alloc_packet(&dev->ep[0],
780 							 desc_len,
781 							 dev->full_conf_desc),
782 					    0);
783 				}
784 			} else
785 			endpoint_stall(&dev->ep[0]);
786 			break;
787 		case USB_DT_STRING:
788 			// If the string descr index in low-byte of setup->wValue
789 			// is valid, send string descr, otherwise stall ep0.
790 			strnum = le16_to_cpu(setup->wValue) & 0xff;
791 			if (strnum >= 0 && strnum < 6) {
792 				struct usb_string_descriptor *desc =
793 				dev->str_desc[strnum];
794 				desc_len = desc_len > desc->bLength ?
795 					desc->bLength : desc_len;
796 				dbg("sending string desc %d", strnum);
797 			send_packet(dev,
798 				    alloc_packet(&dev->ep[0], desc_len,
799 						 desc), 0);
800 			} else
801 			endpoint_stall(&dev->ep[0]);
802 			break;
803 	default:
804 		// Invalid request
805 		err("invalid get desc=%d, stalled",
806 			    le16_to_cpu(setup->wValue) >> 8);
807 		endpoint_stall(&dev->ep[0]);	// Stall endpoint 0
808 			break;
809 		}
810 
811 	return STATUS_STAGE;
812 }
813 
814 static ep0_stage_t
do_set_descriptor(struct usb_dev * dev,struct usb_ctrlrequest * setup)815 do_set_descriptor(struct usb_dev* dev, struct usb_ctrlrequest* setup)
816 {
817 	// TODO: implement
818 	// there will be an OUT data stage (the descriptor to set)
819 	return DATA_STAGE;
820 }
821 
822 static ep0_stage_t
do_get_configuration(struct usb_dev * dev,struct usb_ctrlrequest * setup)823 do_get_configuration(struct usb_dev* dev, struct usb_ctrlrequest* setup)
824 {
825 	// send dev->configuration
826 	dbg("sending config");
827 	send_packet(dev, alloc_packet(&dev->ep[0], 1, &dev->configuration),
828 		    0);
829 	return STATUS_STAGE;
830 }
831 
832 static ep0_stage_t
do_set_configuration(struct usb_dev * dev,struct usb_ctrlrequest * setup)833 do_set_configuration(struct usb_dev* dev, struct usb_ctrlrequest* setup)
834 {
835 	// set active config to low-byte of setup->wValue
836 	dev->configuration = le16_to_cpu(setup->wValue) & 0xff;
837 	dbg("set config, config=%d", dev->configuration);
838 	if (!dev->configuration && dev->state > DEFAULT) {
839 		dev->state = ADDRESS;
840 		/* inform function layer of usbdev state change */
841 		dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
842 	} else if (dev->configuration == 1) {
843 		dev->state = CONFIGURED;
844 		/* inform function layer of usbdev state change */
845 		dev->func_cb(CB_NEW_STATE, dev->state, dev->cb_data);
846 	} else {
847 		// FIXME: "respond with request error" - how?
848 	}
849 
850 	return SETUP_STAGE;
851 }
852 
853 static ep0_stage_t
do_get_interface(struct usb_dev * dev,struct usb_ctrlrequest * setup)854 do_get_interface(struct usb_dev* dev, struct usb_ctrlrequest* setup)
855 {
856 		// interface must be zero.
857 	if ((le16_to_cpu(setup->wIndex) & 0xff) || dev->state == ADDRESS) {
858 			// FIXME: respond with "request error". how?
859 	} else if (dev->state == CONFIGURED) {
860 		// send dev->alternate_setting
861 			dbg("sending alt setting");
862 		send_packet(dev, alloc_packet(&dev->ep[0], 1,
863 					      &dev->alternate_setting), 0);
864 		}
865 
866 	return STATUS_STAGE;
867 
868 }
869 
870 static ep0_stage_t
do_set_interface(struct usb_dev * dev,struct usb_ctrlrequest * setup)871 do_set_interface(struct usb_dev* dev, struct usb_ctrlrequest* setup)
872 {
873 	if (dev->state == ADDRESS) {
874 			// FIXME: respond with "request error". how?
875 	} else if (dev->state == CONFIGURED) {
876 		dev->interface = le16_to_cpu(setup->wIndex) & 0xff;
877 		dev->alternate_setting =
878 			    le16_to_cpu(setup->wValue) & 0xff;
879 			// interface and alternate_setting must be zero
880 		if (dev->interface || dev->alternate_setting) {
881 				// FIXME: respond with "request error". how?
882 			}
883 		}
884 
885 	return SETUP_STAGE;
886 }
887 
888 static ep0_stage_t
do_synch_frame(struct usb_dev * dev,struct usb_ctrlrequest * setup)889 do_synch_frame(struct usb_dev* dev, struct usb_ctrlrequest* setup)
890 {
891 	// TODO
892 	return SETUP_STAGE;
893 }
894 
895 typedef ep0_stage_t (*req_method_t)(struct usb_dev* dev,
896 				    struct usb_ctrlrequest* setup);
897 
898 
899 /* Table of the standard device request handlers */
900 static const req_method_t req_method[] = {
901 	do_get_status,
902 	do_clear_feature,
903 	do_reserved,
904 	do_set_feature,
905 	do_reserved,
906 	do_set_address,
907 	do_get_descriptor,
908 	do_set_descriptor,
909 	do_get_configuration,
910 	do_set_configuration,
911 	do_get_interface,
912 	do_set_interface,
913 	do_synch_frame
914 };
915 
916 
917 // SETUP packet request dispatcher
918 static void
do_setup(struct usb_dev * dev,struct usb_ctrlrequest * setup)919 do_setup (struct usb_dev* dev, struct usb_ctrlrequest* setup)
920 {
921 	req_method_t m;
922 
923 	dbg(__FUNCTION__ ": req %d %s", setup->bRequestType,
924 	    get_std_req_name(setup->bRequestType));
925 
926 	if ((setup->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD ||
927 	    (setup->bRequestType & USB_RECIP_MASK) != USB_RECIP_DEVICE) {
928 		err(__FUNCTION__ ": invalid requesttype 0x%02x",
929 		    setup->bRequestType);
930 		return;
931 		}
932 
933 	if ((setup->bRequestType & 0x80) == USB_DIR_OUT && setup->wLength)
934 		dbg(__FUNCTION__ ": OUT phase! length=%d", setup->wLength);
935 
936 	if (setup->bRequestType < sizeof(req_method)/sizeof(req_method_t))
937 		m = req_method[setup->bRequestType];
938 			else
939 		m = do_reserved;
940 
941 	dev->ep0_stage = (*m)(dev, setup);
942 }
943 
944 /*
945  * A SETUP, DATA0, or DATA1 packet has been received
946  * on the default control endpoint's fifo.
947  */
948 static void
process_ep0_receive(struct usb_dev * dev)949 process_ep0_receive (struct usb_dev* dev)
950 {
951 	endpoint_t *ep0 = &dev->ep[0];
952 	usbdev_pkt_t *pkt;
953 
954 	spin_lock(&ep0->lock);
955 
956 		// complete packet and prepare a new packet
957 	pkt = receive_packet_complete(ep0);
958 	if (!pkt) {
959 		// FIXME: should  put a warn/err here.
960 		spin_unlock(&ep0->lock);
961 			return;
962 		}
963 
964 	// unlink immediately from endpoint.
965 	unlink_head(&ep0->outlist);
966 
967 	// override current stage if h/w says it's a setup packet
968 	if (pkt->status & PKT_STATUS_SU)
969 		dev->ep0_stage = SETUP_STAGE;
970 
971 	switch (dev->ep0_stage) {
972 	case SETUP_STAGE:
973 		vdbg("SU bit is %s in setup stage",
974 		     (pkt->status & PKT_STATUS_SU) ? "set" : "not set");
975 
976 			if (pkt->size == sizeof(struct usb_ctrlrequest)) {
977 #ifdef VDEBUG
978 			if (pkt->status & PKT_STATUS_ACK)
979 				vdbg("received SETUP");
980 				else
981 				vdbg("received NAK SETUP");
982 #endif
983 			do_setup(dev, (struct usb_ctrlrequest*)pkt->payload);
984 		} else
985 			err(__FUNCTION__ ": wrong size SETUP received");
986 		break;
987 	case DATA_STAGE:
988 		/*
989 		 * this setup has an OUT data stage. Of the standard
990 		 * device requests, only set_descriptor has this stage,
991 		 * so this packet is that descriptor. TODO: drop it for
992 		 * now, set_descriptor not implemented.
993 		 *
994 		 * Need to place a byte in the write FIFO here, to prepare
995 		 * to send a zero-length DATA ack packet to the host in the
996 		 * STATUS stage.
997 		 */
998 		au_writel(0, ep0->reg->write_fifo);
999 		dbg("received OUT stage DATAx on EP0, size=%d", pkt->size);
1000 		dev->ep0_stage = SETUP_STAGE;
1001 		break;
1002 	case STATUS_STAGE:
1003 		// this setup had an IN data stage, and host is ACK'ing
1004 		// the packet we sent during that stage.
1005 		if (pkt->size != 0)
1006 			warn("received non-zero ACK on EP0??");
1007 #ifdef VDEBUG
1008 		else
1009 			vdbg("received ACK on EP0");
1010 #endif
1011 		dev->ep0_stage = SETUP_STAGE;
1012 		break;
1013 		}
1014 
1015 	spin_unlock(&ep0->lock);
1016 		// we're done processing the packet, free it
1017 		kfree(pkt);
1018 }
1019 
1020 
1021 /*
1022  * A DATA0/1 packet has been received on one of the OUT endpoints (4 or 5)
1023  */
1024 static void
process_ep_receive(struct usb_dev * dev,endpoint_t * ep)1025 process_ep_receive (struct usb_dev* dev, endpoint_t *ep)
1026 {
1027 	usbdev_pkt_t *pkt;
1028 
1029 		spin_lock(&ep->lock);
1030 	pkt = receive_packet_complete(ep);
1031 		spin_unlock(&ep->lock);
1032 
1033 	dev->func_cb(CB_PKT_COMPLETE, (unsigned long)pkt, dev->cb_data);
1034 }
1035 
1036 
1037 
1038 /* This ISR handles the receive complete and suspend events */
1039 static void
req_sus_intr(int irq,void * dev_id,struct pt_regs * regs)1040 req_sus_intr (int irq, void *dev_id, struct pt_regs *regs)
1041 {
1042 	struct usb_dev *dev = (struct usb_dev *) dev_id;
1043 	u32 status;
1044 
1045 	status = au_readl(USBD_INTSTAT);
1046 	au_writel(status, USBD_INTSTAT);	// ack'em
1047 
1048 	if (status & (1<<0))
1049 		process_ep0_receive(dev);
1050 	if (status & (1<<4))
1051 		process_ep_receive(dev, &dev->ep[4]);
1052 	if (status & (1<<5))
1053 		process_ep_receive(dev, &dev->ep[5]);
1054 }
1055 
1056 
1057 /* This ISR handles the DMA done events on EP0 */
1058 static void
dma_done_ep0_intr(int irq,void * dev_id,struct pt_regs * regs)1059 dma_done_ep0_intr(int irq, void *dev_id, struct pt_regs *regs)
1060 {
1061 	struct usb_dev *dev = (struct usb_dev *) dev_id;
1062 	usbdev_pkt_t* pkt;
1063 	endpoint_t *ep0 = &dev->ep[0];
1064 	u32 cs0, buff_done;
1065 
1066 	spin_lock(&ep0->lock);
1067 	cs0 = au_readl(ep0->reg->ctrl_stat);
1068 
1069 	// first check packet transmit done
1070 	if ((buff_done = get_dma_buffer_done(ep0->indma)) != 0) {
1071 		// transmitted a DATAx packet during DATA stage
1072 		// on control endpoint 0
1073 		// clear DMA done bit
1074 		if (buff_done & DMA_D0)
1075 			clear_dma_done0(ep0->indma);
1076 		if (buff_done & DMA_D1)
1077 			clear_dma_done1(ep0->indma);
1078 
1079 		pkt = send_packet_complete(ep0);
1080 		if (pkt)
1081 			kfree(pkt);
1082 	}
1083 
1084 	/*
1085 	 * Now check packet receive done. Shouldn't get these,
1086 	 * the receive packet complete intr should happen
1087 	 * before the DMA done intr occurs.
1088 	 */
1089 	if ((buff_done = get_dma_buffer_done(ep0->outdma)) != 0) {
1090 		// clear DMA done bit
1091 		if (buff_done & DMA_D0)
1092 			clear_dma_done0(ep0->outdma);
1093 		if (buff_done & DMA_D1)
1094 			clear_dma_done1(ep0->outdma);
1095 
1096 		//process_ep0_receive(dev);
1097 	}
1098 
1099 	spin_unlock(&ep0->lock);
1100 }
1101 
1102 /* This ISR handles the DMA done events on endpoints 2,3,4,5 */
1103 static void
dma_done_ep_intr(int irq,void * dev_id,struct pt_regs * regs)1104 dma_done_ep_intr(int irq, void *dev_id, struct pt_regs *regs)
1105 {
1106 	struct usb_dev *dev = (struct usb_dev *) dev_id;
1107 	int i;
1108 
1109 	for (i = 2; i < 6; i++) {
1110 	u32 buff_done;
1111 		usbdev_pkt_t* pkt;
1112 		endpoint_t *ep = &dev->ep[i];
1113 
1114 		if (!ep->active) continue;
1115 
1116 	spin_lock(&ep->lock);
1117 
1118 		if (ep->direction == USB_DIR_IN) {
1119 			buff_done = get_dma_buffer_done(ep->indma);
1120 			if (buff_done != 0) {
1121 				// transmitted a DATAx pkt on the IN ep
1122 		// clear DMA done bit
1123 		if (buff_done & DMA_D0)
1124 			clear_dma_done0(ep->indma);
1125 		if (buff_done & DMA_D1)
1126 			clear_dma_done1(ep->indma);
1127 
1128 				pkt = send_packet_complete(ep);
1129 
1130 				spin_unlock(&ep->lock);
1131 				dev->func_cb(CB_PKT_COMPLETE,
1132 					     (unsigned long)pkt,
1133 					     dev->cb_data);
1134 				spin_lock(&ep->lock);
1135 			}
1136 		} else {
1137 	/*
1138 			 * Check packet receive done (OUT ep). Shouldn't get
1139 			 * these, the rx packet complete intr should happen
1140 	 * before the DMA done intr occurs.
1141 	 */
1142 			buff_done = get_dma_buffer_done(ep->outdma);
1143 			if (buff_done != 0) {
1144 				// received a DATAx pkt on the OUT ep
1145 		// clear DMA done bit
1146 		if (buff_done & DMA_D0)
1147 			clear_dma_done0(ep->outdma);
1148 		if (buff_done & DMA_D1)
1149 			clear_dma_done1(ep->outdma);
1150 
1151 				//process_ep_receive(dev, ep);
1152 	}
1153 	}
1154 
1155 		spin_unlock(&ep->lock);
1156 	}
1157 }
1158 
1159 
1160 /***************************************************************************
1161  * Here begins the external interface functions
1162  ***************************************************************************
1163  */
1164 
1165 /*
1166  * allocate a new packet
1167  */
1168 int
usbdev_alloc_packet(int ep_addr,int data_size,usbdev_pkt_t ** pkt)1169 usbdev_alloc_packet(int ep_addr, int data_size, usbdev_pkt_t** pkt)
1170 {
1171 	endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);
1172 	usbdev_pkt_t* lpkt = NULL;
1173 
1174 	if (!ep || !ep->active || ep->address < 2)
1175 		return -ENODEV;
1176 	if (data_size > ep->max_pkt_size)
1177 		return -EINVAL;
1178 
1179 	lpkt = *pkt = alloc_packet(ep, data_size, NULL);
1180 	if (!lpkt)
1181 		return -ENOMEM;
1182 	return 0;
1183 }
1184 
1185 
1186 /*
1187  * packet send
1188  */
1189 int
usbdev_send_packet(int ep_addr,usbdev_pkt_t * pkt)1190 usbdev_send_packet(int ep_addr, usbdev_pkt_t * pkt)
1191 {
1192 	unsigned long flags;
1193 	int count;
1194 	endpoint_t * ep;
1195 
1196 	if (!pkt || !(ep = epaddr_to_ep(&usbdev, pkt->ep_addr)) ||
1197 	    !ep->active || ep->address < 2)
1198 		return -ENODEV;
1199 	if (ep->direction != USB_DIR_IN)
1200 		return -EINVAL;
1201 
1202 	spin_lock_irqsave(&ep->lock, flags);
1203 	count = send_packet(&usbdev, pkt, 1);
1204 	spin_unlock_irqrestore(&ep->lock, flags);
1205 
1206 	return count;
1207 }
1208 
1209 /*
1210  * packet receive
1211  */
1212 int
usbdev_receive_packet(int ep_addr,usbdev_pkt_t ** pkt)1213 usbdev_receive_packet(int ep_addr, usbdev_pkt_t** pkt)
1214 {
1215 	unsigned long flags;
1216 	usbdev_pkt_t* lpkt = NULL;
1217 	endpoint_t *ep = epaddr_to_ep(&usbdev, ep_addr);
1218 
1219 	if (!ep || !ep->active || ep->address < 2)
1220 		return -ENODEV;
1221 	if (ep->direction != USB_DIR_OUT)
1222 		return -EINVAL;
1223 
1224 	spin_lock_irqsave(&ep->lock, flags);
1225 	if (ep->outlist.count > 1)
1226 		lpkt = unlink_head(&ep->outlist);
1227 	spin_unlock_irqrestore(&ep->lock, flags);
1228 
1229 	if (!lpkt) {
1230 		/* no packet available */
1231 		*pkt = NULL;
1232 		return -ENODATA;
1233 	}
1234 
1235 	*pkt = lpkt;
1236 
1237 	return lpkt->size;
1238 }
1239 
1240 
1241 /*
1242  * return total queued byte count on the endpoint.
1243  */
1244 int
usbdev_get_byte_count(int ep_addr)1245 usbdev_get_byte_count(int ep_addr)
1246 {
1247         unsigned long flags;
1248         pkt_list_t *list;
1249         usbdev_pkt_t *scan;
1250         int count = 0;
1251 	endpoint_t * ep = epaddr_to_ep(&usbdev, ep_addr);
1252 
1253 	if (!ep || !ep->active || ep->address < 2)
1254 		return -ENODEV;
1255 
1256 	if (ep->direction == USB_DIR_IN) {
1257 		list = &ep->inlist;
1258 
1259 		spin_lock_irqsave(&ep->lock, flags);
1260 		for (scan = list->head; scan; scan = scan->next)
1261 			count += scan->size;
1262 		spin_unlock_irqrestore(&ep->lock, flags);
1263 	} else {
1264 		list = &ep->outlist;
1265 
1266 		spin_lock_irqsave(&ep->lock, flags);
1267 		if (list->count > 1) {
1268 			for (scan = list->head; scan != list->tail;
1269 			     scan = scan->next)
1270 				count += scan->size;
1271 	}
1272 		spin_unlock_irqrestore(&ep->lock, flags);
1273 	}
1274 
1275 	return count;
1276 }
1277 
1278 
1279 void
usbdev_exit(void)1280 usbdev_exit(void)
1281 {
1282 	endpoint_t *ep;
1283 	int i;
1284 
1285 	au_writel(0, USBD_INTEN);	// disable usb dev ints
1286 	au_writel(0, USBD_ENABLE);	// disable usb dev
1287 
1288 	free_irq(AU1000_USB_DEV_REQ_INT, &usbdev);
1289 	free_irq(AU1000_USB_DEV_SUS_INT, &usbdev);
1290 
1291 	// free all control endpoint resources
1292 	ep = &usbdev.ep[0];
1293 	free_au1000_dma(ep->indma);
1294 	free_au1000_dma(ep->outdma);
1295 	endpoint_flush(ep);
1296 
1297 	// free ep resources
1298 	for (i = 2; i < 6; i++) {
1299 		ep = &usbdev.ep[i];
1300 		if (!ep->active) continue;
1301 
1302 		if (ep->direction == USB_DIR_IN) {
1303 			free_au1000_dma(ep->indma);
1304 		} else {
1305 		free_au1000_dma(ep->outdma);
1306 		}
1307 		endpoint_flush(ep);
1308 	}
1309 
1310 	if (usbdev.full_conf_desc)
1311 		kfree(usbdev.full_conf_desc);
1312 }
1313 
1314 int
usbdev_init(struct usb_device_descriptor * dev_desc,struct usb_config_descriptor * config_desc,struct usb_interface_descriptor * if_desc,struct usb_endpoint_descriptor * ep_desc,struct usb_string_descriptor * str_desc[],void (* cb)(usbdev_cb_type_t,unsigned long,void *),void * cb_data)1315 usbdev_init(struct usb_device_descriptor* dev_desc,
1316 	    struct usb_config_descriptor* config_desc,
1317 	    struct usb_interface_descriptor* if_desc,
1318 	    struct usb_endpoint_descriptor* ep_desc,
1319 	    struct usb_string_descriptor* str_desc[],
1320 	    void (*cb)(usbdev_cb_type_t, unsigned long, void *),
1321 	    void* cb_data)
1322 {
1323 	endpoint_t *ep0;
1324 	int i, ret=0;
1325 	u8* fcd;
1326 
1327 	if (dev_desc->bNumConfigurations > 1 ||
1328 	    config_desc->bNumInterfaces > 1 ||
1329 	    if_desc->bNumEndpoints > 4) {
1330 		err("Only one config, one i/f, and no more "
1331 		    "than 4 ep's allowed");
1332 		ret = -EINVAL;
1333 		goto out;
1334 	}
1335 
1336 	if (!cb) {
1337 		err("Function-layer callback required");
1338 		ret = -EINVAL;
1339 		goto out;
1340 	}
1341 
1342 	if (dev_desc->bMaxPacketSize0 != USBDEV_EP0_MAX_PACKET_SIZE) {
1343 		warn("EP0 Max Packet size must be %d",
1344 		     USBDEV_EP0_MAX_PACKET_SIZE);
1345 		dev_desc->bMaxPacketSize0 = USBDEV_EP0_MAX_PACKET_SIZE;
1346 	}
1347 
1348 	memset(&usbdev, 0, sizeof(struct usb_dev));
1349 
1350 	usbdev.state = DEFAULT;
1351 	usbdev.dev_desc = dev_desc;
1352 	usbdev.if_desc = if_desc;
1353 	usbdev.conf_desc = config_desc;
1354 	for (i=0; i<6; i++)
1355 		usbdev.str_desc[i] = str_desc[i];
1356 	usbdev.func_cb = cb;
1357 	usbdev.cb_data = cb_data;
1358 
1359 	/* Initialize default control endpoint */
1360 	ep0 = &usbdev.ep[0];
1361 	ep0->active = 1;
1362 	ep0->type = CONTROL_EP;
1363 	ep0->max_pkt_size = USBDEV_EP0_MAX_PACKET_SIZE;
1364 	spin_lock_init(&ep0->lock);
1365 	ep0->desc = NULL;	// ep0 has no descriptor
1366 	ep0->address = 0;
1367 	ep0->direction = 0;
1368 	ep0->reg = &ep_reg[0];
1369 
1370 	/* Initialize the other requested endpoints */
1371 	for (i = 0; i < if_desc->bNumEndpoints; i++) {
1372 		struct usb_endpoint_descriptor* epd = &ep_desc[i];
1373 	endpoint_t *ep;
1374 
1375 		if ((epd->bEndpointAddress & 0x80) == USB_DIR_IN) {
1376 			ep = &usbdev.ep[2];
1377 			ep->address = 2;
1378 			if (ep->active) {
1379 				ep = &usbdev.ep[3];
1380 				ep->address = 3;
1381 				if (ep->active) {
1382 					err("too many IN ep's requested");
1383 					ret = -ENODEV;
1384 					goto out;
1385 	}
1386 	}
1387 		} else {
1388 			ep = &usbdev.ep[4];
1389 			ep->address = 4;
1390 			if (ep->active) {
1391 				ep = &usbdev.ep[5];
1392 				ep->address = 5;
1393 				if (ep->active) {
1394 					err("too many OUT ep's requested");
1395 					ret = -ENODEV;
1396 					goto out;
1397 	}
1398 	}
1399 		}
1400 
1401 		ep->active = 1;
1402 		epd->bEndpointAddress &= ~0x0f;
1403 		epd->bEndpointAddress |= (u8)ep->address;
1404 		ep->direction = epd->bEndpointAddress & 0x80;
1405 		ep->type = epd->bmAttributes & 0x03;
1406 		ep->max_pkt_size = epd->wMaxPacketSize;
1407 		spin_lock_init(&ep->lock);
1408 		ep->desc = epd;
1409 		ep->reg = &ep_reg[ep->address];
1410 		}
1411 
1412 	/*
1413 	 * initialize the full config descriptor
1414 	 */
1415 	usbdev.full_conf_desc = fcd = kmalloc(config_desc->wTotalLength,
1416 					      ALLOC_FLAGS);
1417 	if (!fcd) {
1418 		err("failed to alloc full config descriptor");
1419 		ret = -ENOMEM;
1420 		goto out;
1421 	}
1422 
1423 	memcpy(fcd, config_desc, USB_DT_CONFIG_SIZE);
1424 	fcd += USB_DT_CONFIG_SIZE;
1425 	memcpy(fcd, if_desc, USB_DT_INTERFACE_SIZE);
1426 	fcd += USB_DT_INTERFACE_SIZE;
1427 	for (i = 0; i < if_desc->bNumEndpoints; i++) {
1428 		memcpy(fcd, &ep_desc[i], USB_DT_ENDPOINT_SIZE);
1429 		fcd += USB_DT_ENDPOINT_SIZE;
1430 	}
1431 
1432 	/* Now we're ready to enable the controller */
1433 	au_writel(0x0002, USBD_ENABLE);
1434 	udelay(100);
1435 	au_writel(0x0003, USBD_ENABLE);
1436 	udelay(100);
1437 
1438 	/* build and send config table based on ep descriptors */
1439 	for (i = 0; i < 6; i++) {
1440 		endpoint_t *ep;
1441 		if (i == 1)
1442 			continue; // skip dummy ep
1443 		ep = &usbdev.ep[i];
1444 		if (ep->active) {
1445 			au_writel((ep->address << 4) | 0x04, USBD_CONFIG);
1446 			au_writel(((ep->max_pkt_size & 0x380) >> 7) |
1447 				  (ep->direction >> 4) | (ep->type << 4),
1448 				  USBD_CONFIG);
1449 			au_writel((ep->max_pkt_size & 0x7f) << 1, USBD_CONFIG);
1450 			au_writel(0x00, USBD_CONFIG);
1451 			au_writel(ep->address, USBD_CONFIG);
1452 		} else {
1453 			u8 dir = (i==2 || i==3) ? DIR_IN : DIR_OUT;
1454 			au_writel((i << 4) | 0x04, USBD_CONFIG);
1455 			au_writel(((16 & 0x380) >> 7) | dir |
1456 				  (BULK_EP << 4), USBD_CONFIG);
1457 			au_writel((16 & 0x7f) << 1, USBD_CONFIG);
1458 			au_writel(0x00, USBD_CONFIG);
1459 			au_writel(i, USBD_CONFIG);
1460 		}
1461 	}
1462 
1463 	/*
1464 	 * Enable Receive FIFO Complete interrupts only. Transmit
1465 	 * complete is being handled by the DMA done interrupts.
1466 	 */
1467 	au_writel(0x31, USBD_INTEN);
1468 
1469 	/*
1470 	 * Controller is now enabled, request DMA and IRQ
1471 	 * resources.
1472 	 */
1473 
1474 	/* request the USB device transfer complete interrupt */
1475 	if (request_irq(AU1000_USB_DEV_REQ_INT, req_sus_intr, SA_INTERRUPT,
1476 			"USBdev req", &usbdev)) {
1477 		err("Can't get device request intr");
1478 		ret = -ENXIO;
1479 		goto out;
1480 	}
1481 	/* request the USB device suspend interrupt */
1482 	if (request_irq(AU1000_USB_DEV_SUS_INT, req_sus_intr, SA_INTERRUPT,
1483 			"USBdev sus", &usbdev)) {
1484 		err("Can't get device suspend intr");
1485 		ret = -ENXIO;
1486 		goto out;
1487 	}
1488 
1489 	/* Request EP0 DMA and IRQ */
1490 	if ((ep0->indma = request_au1000_dma(ep_dma_id[0].id,
1491 					     ep_dma_id[0].str,
1492 					     dma_done_ep0_intr,
1493 					     SA_INTERRUPT,
1494 					     &usbdev)) < 0) {
1495 		err("Can't get %s DMA", ep_dma_id[0].str);
1496 		ret = -ENXIO;
1497 		goto out;
1498 	}
1499 	if ((ep0->outdma = request_au1000_dma(ep_dma_id[1].id,
1500 					      ep_dma_id[1].str,
1501 					      NULL, 0, NULL)) < 0) {
1502 		err("Can't get %s DMA", ep_dma_id[1].str);
1503 		ret = -ENXIO;
1504 		goto out;
1505 	}
1506 
1507 	// Flush the ep0 buffers and FIFOs
1508 	endpoint_flush(ep0);
1509 	// start packet reception on ep0
1510 	kickstart_receive_packet(ep0);
1511 
1512 	/* Request DMA and IRQ for the other endpoints */
1513 	for (i = 2; i < 6; i++) {
1514 		endpoint_t *ep = &usbdev.ep[i];
1515 		if (!ep->active)
1516 			continue;
1517 
1518 		// Flush the endpoint buffers and FIFOs
1519 		endpoint_flush(ep);
1520 
1521 		if (ep->direction == USB_DIR_IN) {
1522 			ep->indma =
1523 				request_au1000_dma(ep_dma_id[ep->address].id,
1524 						   ep_dma_id[ep->address].str,
1525 						   dma_done_ep_intr,
1526 						   SA_INTERRUPT,
1527 						   &usbdev);
1528 			if (ep->indma < 0) {
1529 				err("Can't get %s DMA",
1530 				    ep_dma_id[ep->address].str);
1531 				ret = -ENXIO;
1532 				goto out;
1533 			}
1534 		} else {
1535 			ep->outdma =
1536 				request_au1000_dma(ep_dma_id[ep->address].id,
1537 						   ep_dma_id[ep->address].str,
1538 						   NULL, 0, NULL);
1539 			if (ep->outdma < 0) {
1540 				err("Can't get %s DMA",
1541 				    ep_dma_id[ep->address].str);
1542 				ret = -ENXIO;
1543 				goto out;
1544 			}
1545 
1546 			// start packet reception on OUT endpoint
1547 			kickstart_receive_packet(ep);
1548 		}
1549 	}
1550 
1551  out:
1552 	if (ret)
1553 		usbdev_exit();
1554 	return ret;
1555 }
1556 
1557 EXPORT_SYMBOL(usbdev_init);
1558 EXPORT_SYMBOL(usbdev_exit);
1559 EXPORT_SYMBOL(usbdev_alloc_packet);
1560 EXPORT_SYMBOL(usbdev_receive_packet);
1561 EXPORT_SYMBOL(usbdev_send_packet);
1562 EXPORT_SYMBOL(usbdev_get_byte_count);
1563