1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Johannes Erdfelt <johannes@erdfelt.com>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  *
17  * Intel documents this fairly well, and as far as I know there
18  * are no royalties or anything like that, but even so there are
19  * people who decided that they want to do the same thing in a
20  * completely different way.
21  *
22  * WARNING! The USB documentation is downright evil. Most of it
23  * is just crap, written by a committee. You're better off ignoring
24  * most of it, the important stuff is:
25  *  - the low-level protocol (fairly simple but lots of small details)
26  *  - working around the horridness of the rest
27  */
28 
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/delay.h>
35 #include <linux/ioport.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/smp_lock.h>
39 #include <linux/errno.h>
40 #include <linux/unistd.h>
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/proc_fs.h>
44 #ifdef CONFIG_USB_DEBUG
45 #define DEBUG
46 #else
47 #undef DEBUG
48 #endif
49 #include <linux/usb.h>
50 
51 #include <asm/uaccess.h>
52 #include <asm/io.h>
53 #include <asm/irq.h>
54 #include <asm/system.h>
55 
56 #include "uhci.h"
57 
58 #include <linux/pm.h>
59 
60 #include "../hcd.h"
61 
62 /*
63  * Version Information
64  */
65 #define DRIVER_VERSION "v1.1"
66 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber"
67 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
68 
69 /*
70  * debug = 0, no debugging messages
71  * debug = 1, dump failed URB's except for stalls
72  * debug = 2, dump all failed URB's (including stalls)
73  *            show all queues in /proc/uhci/hc*
74  * debug = 3, show all TD's in URB's when dumping
75  */
76 #ifdef DEBUG
77 static int debug = 1;
78 #else
79 static int debug = 0;
80 #endif
81 MODULE_PARM(debug, "i");
82 MODULE_PARM_DESC(debug, "Debug level");
83 static char *errbuf;
84 #define ERRBUF_LEN    (PAGE_SIZE * 8)
85 
86 #include "uhci-debug.h"
87 
88 static kmem_cache_t *uhci_up_cachep;	/* urb_priv */
89 
90 static int rh_submit_urb(struct urb *urb);
91 static int rh_unlink_urb(struct urb *urb);
92 static int uhci_get_current_frame_number(struct usb_device *dev);
93 static int uhci_unlink_urb(struct urb *urb);
94 static void uhci_unlink_generic(struct uhci *uhci, struct urb *urb);
95 static void uhci_call_completion(struct urb *urb);
96 
97 static int  ports_active(struct uhci *uhci);
98 static void suspend_hc(struct uhci *uhci);
99 static void wakeup_hc(struct uhci *uhci);
100 
101 /* If a transfer is still active after this much time, turn off FSBR */
102 #define IDLE_TIMEOUT	(HZ / 20)	/* 50 ms */
103 #define FSBR_DELAY	(HZ / 20)	/* 50 ms */
104 
105 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
106 /* depth first traversal. We'll do it in groups of this number of TD's */
107 /* to make sure it doesn't hog all of the bandwidth */
108 #define DEPTH_INTERVAL	5
109 
110 #define MAX_URB_LOOP	2048		/* Maximum number of linked URB's */
111 
112 /*
113  * Only the USB core should call uhci_alloc_dev and uhci_free_dev
114  */
uhci_alloc_dev(struct usb_device * dev)115 static int uhci_alloc_dev(struct usb_device *dev)
116 {
117 	return 0;
118 }
119 
uhci_free_dev(struct usb_device * dev)120 static int uhci_free_dev(struct usb_device *dev)
121 {
122 	return 0;
123 }
124 
125 /*
126  * Technically, updating td->status here is a race, but it's not really a
127  * problem. The worst that can happen is that we set the IOC bit again
128  * generating a spurios interrupt. We could fix this by creating another
129  * QH and leaving the IOC bit always set, but then we would have to play
130  * games with the FSBR code to make sure we get the correct order in all
131  * the cases. I don't think it's worth the effort
132  */
uhci_set_next_interrupt(struct uhci * uhci)133 static inline void uhci_set_next_interrupt(struct uhci *uhci)
134 {
135 	unsigned long flags;
136 
137 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
138 	uhci->skel_term_td->status |= TD_CTRL_IOC;
139 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
140 }
141 
uhci_clear_next_interrupt(struct uhci * uhci)142 static inline void uhci_clear_next_interrupt(struct uhci *uhci)
143 {
144 	unsigned long flags;
145 
146 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
147 	uhci->skel_term_td->status &= ~TD_CTRL_IOC;
148 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
149 }
150 
uhci_add_complete(struct urb * urb)151 static inline void uhci_add_complete(struct urb *urb)
152 {
153 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
154 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
155 	unsigned long flags;
156 
157 	spin_lock_irqsave(&uhci->complete_list_lock, flags);
158 	list_add_tail(&urbp->complete_list, &uhci->complete_list);
159 	spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
160 }
161 
uhci_alloc_td(struct uhci * uhci,struct usb_device * dev)162 static struct uhci_td *uhci_alloc_td(struct uhci *uhci, struct usb_device *dev)
163 {
164 	dma_addr_t dma_handle;
165 	struct uhci_td *td;
166 
167 	td = pci_pool_alloc(uhci->td_pool, GFP_DMA | GFP_ATOMIC, &dma_handle);
168 	if (!td)
169 		return NULL;
170 
171 	td->dma_handle = dma_handle;
172 
173 	td->link = UHCI_PTR_TERM;
174 	td->buffer = 0;
175 
176 	td->frame = -1;
177 	td->dev = dev;
178 
179 	INIT_LIST_HEAD(&td->list);
180 	INIT_LIST_HEAD(&td->fl_list);
181 
182 	usb_inc_dev_use(dev);
183 
184 	return td;
185 }
186 
uhci_fill_td(struct uhci_td * td,__u32 status,__u32 info,__u32 buffer)187 static void inline uhci_fill_td(struct uhci_td *td, __u32 status,
188 		__u32 info, __u32 buffer)
189 {
190 	td->status = status;
191 	td->info = info;
192 	td->buffer = buffer;
193 }
194 
uhci_insert_td(struct uhci * uhci,struct uhci_td * skeltd,struct uhci_td * td)195 static void uhci_insert_td(struct uhci *uhci, struct uhci_td *skeltd, struct uhci_td *td)
196 {
197 	unsigned long flags;
198 	struct uhci_td *ltd;
199 
200 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
201 
202 	ltd = list_entry(skeltd->fl_list.prev, struct uhci_td, fl_list);
203 
204 	td->link = ltd->link;
205 	mb();
206 	ltd->link = td->dma_handle;
207 
208 	list_add_tail(&td->fl_list, &skeltd->fl_list);
209 
210 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
211 }
212 
213 /*
214  * We insert Isochronous transfers directly into the frame list at the
215  * beginning
216  * The layout looks as follows:
217  * frame list pointer -> iso td's (if any) ->
218  * periodic interrupt td (if frame 0) -> irq td's -> control qh -> bulk qh
219  */
uhci_insert_td_frame_list(struct uhci * uhci,struct uhci_td * td,unsigned framenum)220 static void uhci_insert_td_frame_list(struct uhci *uhci, struct uhci_td *td, unsigned framenum)
221 {
222 	unsigned long flags;
223 
224 	framenum %= UHCI_NUMFRAMES;
225 
226 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
227 
228 	td->frame = framenum;
229 
230 	/* Is there a TD already mapped there? */
231 	if (uhci->fl->frame_cpu[framenum]) {
232 		struct uhci_td *ftd, *ltd;
233 
234 		ftd = uhci->fl->frame_cpu[framenum];
235 		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
236 
237 		list_add_tail(&td->fl_list, &ftd->fl_list);
238 
239 		td->link = ltd->link;
240 		mb();
241 		ltd->link = td->dma_handle;
242 	} else {
243 		td->link = uhci->fl->frame[framenum];
244 		mb();
245 		uhci->fl->frame[framenum] = td->dma_handle;
246 		uhci->fl->frame_cpu[framenum] = td;
247 	}
248 
249 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
250 }
251 
uhci_remove_td(struct uhci * uhci,struct uhci_td * td)252 static void uhci_remove_td(struct uhci *uhci, struct uhci_td *td)
253 {
254 	unsigned long flags;
255 
256 	/* If it's not inserted, don't remove it */
257 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
258 	if (td->frame == -1 && list_empty(&td->fl_list))
259 		goto out;
260 
261 	if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
262 		if (list_empty(&td->fl_list)) {
263 			uhci->fl->frame[td->frame] = td->link;
264 			uhci->fl->frame_cpu[td->frame] = NULL;
265 		} else {
266 			struct uhci_td *ntd;
267 
268 			ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
269 			uhci->fl->frame[td->frame] = ntd->dma_handle;
270 			uhci->fl->frame_cpu[td->frame] = ntd;
271 		}
272 	} else {
273 		struct uhci_td *ptd;
274 
275 		ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
276 		ptd->link = td->link;
277 	}
278 
279 	mb();
280 	td->link = UHCI_PTR_TERM;
281 
282 	list_del_init(&td->fl_list);
283 	td->frame = -1;
284 
285 out:
286 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
287 }
288 
289 /*
290  * Inserts a td into qh list at the top.
291  */
uhci_insert_tds_in_qh(struct uhci_qh * qh,struct urb * urb,int breadth)292 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, int breadth)
293 {
294 	struct list_head *tmp, *head;
295 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
296 	struct uhci_td *td, *ptd;
297 
298 	if (list_empty(&urbp->td_list))
299 		return;
300 
301 	head = &urbp->td_list;
302 	tmp = head->next;
303 
304 	/* Ordering isn't important here yet since the QH hasn't been */
305 	/*  inserted into the schedule yet */
306 	td = list_entry(tmp, struct uhci_td, list);
307 
308 	/* Add the first TD to the QH element pointer */
309 	qh->element = td->dma_handle | (breadth ? 0 : UHCI_PTR_DEPTH);
310 
311 	ptd = td;
312 
313 	/* Then link the rest of the TD's */
314 	tmp = tmp->next;
315 	while (tmp != head) {
316 		td = list_entry(tmp, struct uhci_td, list);
317 
318 		tmp = tmp->next;
319 
320 		ptd->link = td->dma_handle | (breadth ? 0 : UHCI_PTR_DEPTH);
321 
322 		ptd = td;
323 	}
324 
325 	ptd->link = UHCI_PTR_TERM;
326 }
327 
uhci_free_td(struct uhci * uhci,struct uhci_td * td)328 static void uhci_free_td(struct uhci *uhci, struct uhci_td *td)
329 {
330 	if (!list_empty(&td->list) || !list_empty(&td->fl_list))
331 		dbg("td is still in URB list!");
332 
333 	if (td->dev)
334 		usb_dec_dev_use(td->dev);
335 
336 	pci_pool_free(uhci->td_pool, td, td->dma_handle);
337 }
338 
uhci_alloc_qh(struct uhci * uhci,struct usb_device * dev)339 static struct uhci_qh *uhci_alloc_qh(struct uhci *uhci, struct usb_device *dev)
340 {
341 	dma_addr_t dma_handle;
342 	struct uhci_qh *qh;
343 
344 	qh = pci_pool_alloc(uhci->qh_pool, GFP_DMA | GFP_ATOMIC, &dma_handle);
345 	if (!qh)
346 		return NULL;
347 
348 	qh->dma_handle = dma_handle;
349 
350 	qh->element = UHCI_PTR_TERM;
351 	qh->link = UHCI_PTR_TERM;
352 
353 	qh->dev = dev;
354 	qh->urbp = NULL;
355 
356 	INIT_LIST_HEAD(&qh->list);
357 	INIT_LIST_HEAD(&qh->remove_list);
358 
359 	usb_inc_dev_use(dev);
360 
361 	return qh;
362 }
363 
uhci_free_qh(struct uhci * uhci,struct uhci_qh * qh)364 static void uhci_free_qh(struct uhci *uhci, struct uhci_qh *qh)
365 {
366 	if (!list_empty(&qh->list))
367 		dbg("qh list not empty!");
368 	if (!list_empty(&qh->remove_list))
369 		dbg("qh still in remove_list!");
370 
371 	if (qh->dev)
372 		usb_dec_dev_use(qh->dev);
373 
374 	pci_pool_free(uhci->qh_pool, qh, qh->dma_handle);
375 }
376 
377 /*
378  * MUST be called with uhci->frame_list_lock acquired
379  */
_uhci_insert_qh(struct uhci * uhci,struct uhci_qh * skelqh,struct urb * urb)380 static void _uhci_insert_qh(struct uhci *uhci, struct uhci_qh *skelqh, struct urb *urb)
381 {
382 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
383 	struct list_head *head, *tmp;
384 	struct uhci_qh *lqh;
385 
386 	/* Grab the last QH */
387 	lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
388 
389 	if (lqh->urbp) {
390 		head = &lqh->urbp->queue_list;
391 		tmp = head->next;
392 		while (head != tmp) {
393 			struct urb_priv *turbp =
394 				list_entry(tmp, struct urb_priv, queue_list);
395 
396 			tmp = tmp->next;
397 
398 			turbp->qh->link = urbp->qh->dma_handle | UHCI_PTR_QH;
399 		}
400 	}
401 
402 	head = &urbp->queue_list;
403 	tmp = head->next;
404 	while (head != tmp) {
405 		struct urb_priv *turbp =
406 			list_entry(tmp, struct urb_priv, queue_list);
407 
408 		tmp = tmp->next;
409 
410 		turbp->qh->link = lqh->link;
411 	}
412 
413 	urbp->qh->link = lqh->link;
414 	mb();				/* Ordering is important */
415 	lqh->link = urbp->qh->dma_handle | UHCI_PTR_QH;
416 
417 	list_add_tail(&urbp->qh->list, &skelqh->list);
418 }
419 
uhci_insert_qh(struct uhci * uhci,struct uhci_qh * skelqh,struct urb * urb)420 static void uhci_insert_qh(struct uhci *uhci, struct uhci_qh *skelqh, struct urb *urb)
421 {
422 	unsigned long flags;
423 
424 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
425 	_uhci_insert_qh(uhci, skelqh, urb);
426 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
427 }
428 
uhci_remove_qh(struct uhci * uhci,struct uhci_qh * qh)429 static void uhci_remove_qh(struct uhci *uhci, struct uhci_qh *qh)
430 {
431 	unsigned long flags;
432 	struct uhci_qh *pqh;
433 
434 	if (!qh)
435 		return;
436 
437 	qh->urbp = NULL;
438 
439 	/* Only go through the hoops if it's actually linked in */
440 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
441 	if (!list_empty(&qh->list)) {
442 		pqh = list_entry(qh->list.prev, struct uhci_qh, list);
443 
444 		if (pqh->urbp) {
445 			struct list_head *head, *tmp;
446 
447 			head = &pqh->urbp->queue_list;
448 			tmp = head->next;
449 			while (head != tmp) {
450 				struct urb_priv *turbp =
451 					list_entry(tmp, struct urb_priv, queue_list);
452 
453 				tmp = tmp->next;
454 
455 				turbp->qh->link = qh->link;
456 			}
457 		}
458 
459 		pqh->link = qh->link;
460 		mb();
461 		qh->element = qh->link = UHCI_PTR_TERM;
462 
463 		list_del_init(&qh->list);
464 	}
465 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
466 
467 	spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
468 
469 	/* Check to see if the remove list is empty. Set the IOC bit */
470 	/* to force an interrupt so we can remove the QH */
471 	if (list_empty(&uhci->qh_remove_list))
472 		uhci_set_next_interrupt(uhci);
473 
474 	list_add(&qh->remove_list, &uhci->qh_remove_list);
475 
476 	spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags);
477 }
478 
uhci_fixup_toggle(struct urb * urb,unsigned int toggle)479 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
480 {
481 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
482 	struct list_head *head, *tmp;
483 
484 	head = &urbp->td_list;
485 	tmp = head->next;
486 	while (head != tmp) {
487 		struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
488 
489 		tmp = tmp->next;
490 
491 		if (toggle)
492 			td->info |= TD_TOKEN_TOGGLE;
493 		else
494 			td->info &= ~TD_TOKEN_TOGGLE;
495 
496 		toggle ^= 1;
497 	}
498 
499 	return toggle;
500 }
501 
502 /* This function will append one URB's QH to another URB's QH. This is for */
503 /*  USB_QUEUE_BULK support for bulk transfers and soon implicitily for */
504 /*  control transfers */
uhci_append_queued_urb(struct uhci * uhci,struct urb * eurb,struct urb * urb)505 static void uhci_append_queued_urb(struct uhci *uhci, struct urb *eurb, struct urb *urb)
506 {
507 	struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
508 	struct list_head *tmp;
509 	struct uhci_td *lltd;
510 	unsigned long flags;
511 
512 	eurbp = eurb->hcpriv;
513 	urbp = urb->hcpriv;
514 
515 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
516 
517 	/* Find the first URB in the queue */
518 	if (eurbp->queued) {
519 		struct list_head *head = &eurbp->queue_list;
520 
521 		tmp = head->next;
522 		while (tmp != head) {
523 			struct urb_priv *turbp =
524 				list_entry(tmp, struct urb_priv, queue_list);
525 
526 			if (!turbp->queued)
527 				break;
528 
529 			tmp = tmp->next;
530 		}
531 	} else
532 		tmp = &eurbp->queue_list;
533 
534 	furbp = list_entry(tmp, struct urb_priv, queue_list);
535 	lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
536 
537 	lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
538 
539 	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe),
540 		uhci_fixup_toggle(urb, uhci_toggle(lltd->info) ^ 1));
541 
542 	/* All qh's in the queue need to link to the next queue */
543 	urbp->qh->link = eurbp->qh->link;
544 
545 	mb();			/* Make sure we flush everything */
546 	/* Only support bulk right now, so no depth */
547 	lltd->link = urbp->qh->dma_handle | UHCI_PTR_QH;
548 
549 	list_add_tail(&urbp->queue_list, &furbp->queue_list);
550 
551 	urbp->queued = 1;
552 
553 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
554 }
555 
uhci_delete_queued_urb(struct uhci * uhci,struct urb * urb)556 static void uhci_delete_queued_urb(struct uhci *uhci, struct urb *urb)
557 {
558 	struct urb_priv *urbp, *nurbp;
559 	struct list_head *head, *tmp;
560 	struct urb_priv *purbp;
561 	struct uhci_td *pltd;
562 	unsigned int toggle;
563 	unsigned long flags;
564 
565 	urbp = urb->hcpriv;
566 
567 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
568 
569 	if (list_empty(&urbp->queue_list))
570 		goto out;
571 
572 	nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
573 
574 	/* Fix up the toggle for the next URB's */
575 	if (!urbp->queued)
576 		/* We set the toggle when we unlink */
577 		toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
578 	else {
579 		/* If we're in the middle of the queue, grab the toggle */
580 		/*  from the TD previous to us */
581 		purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
582 				queue_list);
583 
584 		pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
585 
586 		toggle = uhci_toggle(pltd->info) ^ 1;
587 	}
588 
589 	head = &urbp->queue_list;
590 	tmp = head->next;
591 	while (head != tmp) {
592 		struct urb_priv *turbp;
593 
594 		turbp = list_entry(tmp, struct urb_priv, queue_list);
595 
596 		tmp = tmp->next;
597 
598 		if (!turbp->queued)
599 			break;
600 
601 		toggle = uhci_fixup_toggle(turbp->urb, toggle);
602 	}
603 
604 	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
605 		usb_pipeout(urb->pipe), toggle);
606 
607 	if (!urbp->queued) {
608 		nurbp->queued = 0;
609 
610 		_uhci_insert_qh(uhci, uhci->skel_bulk_qh, nurbp->urb);
611 	} else {
612 		/* We're somewhere in the middle (or end). A bit trickier */
613 		/*  than the head scenario */
614 		purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
615 				queue_list);
616 
617 		pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
618 		if (nurbp->queued)
619 			pltd->link = nurbp->qh->dma_handle | UHCI_PTR_QH;
620 		else
621 			/* The next URB happens to be the beginning, so */
622 			/*  we're the last, end the chain */
623 			pltd->link = UHCI_PTR_TERM;
624 	}
625 
626 	list_del_init(&urbp->queue_list);
627 
628 out:
629 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
630 }
631 
uhci_alloc_urb_priv(struct uhci * uhci,struct urb * urb)632 static struct urb_priv *uhci_alloc_urb_priv(struct uhci *uhci, struct urb *urb)
633 {
634 	struct urb_priv *urbp;
635 
636 	urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
637 	if (!urbp) {
638 		err("uhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n");
639 		return NULL;
640 	}
641 
642 	memset((void *)urbp, 0, sizeof(*urbp));
643 
644 	urbp->inserttime = jiffies;
645 	urbp->fsbrtime = jiffies;
646 	urbp->urb = urb;
647 	urbp->dev = urb->dev;
648 
649 	INIT_LIST_HEAD(&urbp->td_list);
650 	INIT_LIST_HEAD(&urbp->queue_list);
651 	INIT_LIST_HEAD(&urbp->complete_list);
652 
653 	urb->hcpriv = urbp;
654 
655 	if (urb->dev != uhci->rh.dev) {
656 		if (urb->transfer_buffer_length) {
657 			urbp->transfer_buffer_dma_handle = pci_map_single(uhci->dev,
658 				urb->transfer_buffer, urb->transfer_buffer_length,
659 				usb_pipein(urb->pipe) ? PCI_DMA_FROMDEVICE :
660 				PCI_DMA_TODEVICE);
661 			if (!urbp->transfer_buffer_dma_handle)
662 				return NULL;
663 		}
664 
665 		if (usb_pipetype(urb->pipe) == PIPE_CONTROL && urb->setup_packet) {
666 			urbp->setup_packet_dma_handle = pci_map_single(uhci->dev,
667 				urb->setup_packet, sizeof(struct usb_ctrlrequest),
668 				PCI_DMA_TODEVICE);
669 			if (!urbp->setup_packet_dma_handle)
670 				return NULL;
671 		}
672 	}
673 
674 	return urbp;
675 }
676 
677 /*
678  * MUST be called with urb->lock acquired
679  */
uhci_add_td_to_urb(struct urb * urb,struct uhci_td * td)680 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
681 {
682 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
683 
684 	td->urb = urb;
685 
686 	list_add_tail(&td->list, &urbp->td_list);
687 }
688 
689 /*
690  * MUST be called with urb->lock acquired
691  */
uhci_remove_td_from_urb(struct uhci_td * td)692 static void uhci_remove_td_from_urb(struct uhci_td *td)
693 {
694 	if (list_empty(&td->list))
695 		return;
696 
697 	list_del_init(&td->list);
698 
699 	td->urb = NULL;
700 }
701 
702 /*
703  * MUST be called with urb->lock acquired
704  */
uhci_destroy_urb_priv(struct urb * urb)705 static void uhci_destroy_urb_priv(struct urb *urb)
706 {
707 	struct list_head *head, *tmp;
708 	struct urb_priv *urbp;
709 	struct uhci *uhci;
710 
711 	urbp = (struct urb_priv *)urb->hcpriv;
712 	if (!urbp)
713 		return;
714 
715 	if (!urbp->dev || !urbp->dev->bus || !urbp->dev->bus->hcpriv) {
716 		warn("uhci_destroy_urb_priv: urb %p belongs to disconnected device or bus?", urb);
717 		return;
718 	}
719 
720 	if (!list_empty(&urb->urb_list))
721 		warn("uhci_destroy_urb_priv: urb %p still on uhci->urb_list or uhci->remove_list", urb);
722 
723 	if (!list_empty(&urbp->complete_list))
724 		warn("uhci_destroy_urb_priv: urb %p still on uhci->complete_list", urb);
725 
726 	uhci = urbp->dev->bus->hcpriv;
727 
728 	head = &urbp->td_list;
729 	tmp = head->next;
730 	while (tmp != head) {
731 		struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
732 
733 		tmp = tmp->next;
734 
735 		uhci_remove_td_from_urb(td);
736 		uhci_remove_td(uhci, td);
737 		uhci_free_td(uhci, td);
738 	}
739 
740 	if (urbp->setup_packet_dma_handle) {
741 		pci_unmap_single(uhci->dev, urbp->setup_packet_dma_handle,
742 			sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
743 		urbp->setup_packet_dma_handle = 0;
744 	}
745 
746 	if (urbp->transfer_buffer_dma_handle) {
747 		pci_unmap_single(uhci->dev, urbp->transfer_buffer_dma_handle,
748 			urb->transfer_buffer_length, usb_pipein(urb->pipe) ?
749 			PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
750 		urbp->transfer_buffer_dma_handle = 0;
751 	}
752 
753 	urb->hcpriv = NULL;
754 	kmem_cache_free(uhci_up_cachep, urbp);
755 }
756 
uhci_inc_fsbr(struct uhci * uhci,struct urb * urb)757 static void uhci_inc_fsbr(struct uhci *uhci, struct urb *urb)
758 {
759 	unsigned long flags;
760 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
761 
762 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
763 
764 	if ((!(urb->transfer_flags & USB_NO_FSBR)) && !urbp->fsbr) {
765 		urbp->fsbr = 1;
766 		if (!uhci->fsbr++ && !uhci->fsbrtimeout)
767 			uhci->skel_term_qh->link = uhci->skel_hs_control_qh->dma_handle | UHCI_PTR_QH;
768 	}
769 
770 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
771 }
772 
uhci_dec_fsbr(struct uhci * uhci,struct urb * urb)773 static void uhci_dec_fsbr(struct uhci *uhci, struct urb *urb)
774 {
775 	unsigned long flags;
776 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
777 
778 	spin_lock_irqsave(&uhci->frame_list_lock, flags);
779 
780 	if ((!(urb->transfer_flags & USB_NO_FSBR)) && urbp->fsbr) {
781 		urbp->fsbr = 0;
782 		if (!--uhci->fsbr)
783 			uhci->fsbrtimeout = jiffies + FSBR_DELAY;
784 	}
785 
786 	spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
787 }
788 
789 /*
790  * Map status to standard result codes
791  *
792  * <status> is (td->status & 0xFE0000) [a.k.a. uhci_status_bits(td->status)]
793  * <dir_out> is True for output TDs and False for input TDs.
794  */
uhci_map_status(int status,int dir_out)795 static int uhci_map_status(int status, int dir_out)
796 {
797 	if (!status)
798 		return 0;
799 	if (status & TD_CTRL_BITSTUFF)			/* Bitstuff error */
800 		return -EPROTO;
801 	if (status & TD_CTRL_CRCTIMEO) {		/* CRC/Timeout */
802 		if (dir_out)
803 			return -ETIMEDOUT;
804 		else
805 			return -EILSEQ;
806 	}
807 	if (status & TD_CTRL_NAK)			/* NAK */
808 		return -ETIMEDOUT;
809 	if (status & TD_CTRL_BABBLE)			/* Babble */
810 		return -EOVERFLOW;
811 	if (status & TD_CTRL_DBUFERR)			/* Buffer error */
812 		return -ENOSR;
813 	if (status & TD_CTRL_STALLED)			/* Stalled */
814 		return -EPIPE;
815 	if (status & TD_CTRL_ACTIVE)			/* Active */
816 		return 0;
817 
818 	return -EINVAL;
819 }
820 
821 /*
822  * Control transfers
823  */
uhci_submit_control(struct urb * urb)824 static int uhci_submit_control(struct urb *urb)
825 {
826 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
827 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
828 	struct uhci_td *td;
829 	struct uhci_qh *qh;
830 	unsigned long destination, status;
831 	int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
832 	int len = urb->transfer_buffer_length;
833 	dma_addr_t data = urbp->transfer_buffer_dma_handle;
834 
835 	/* The "pipe" thing contains the destination in bits 8--18 */
836 	destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
837 
838 	/* 3 errors */
839 	status = (urb->pipe & TD_CTRL_LS) | TD_CTRL_ACTIVE | (3 << 27);
840 
841 	/*
842 	 * Build the TD for the control request
843 	 */
844 	td = uhci_alloc_td(uhci, urb->dev);
845 	if (!td)
846 		return -ENOMEM;
847 
848 	uhci_add_td_to_urb(urb, td);
849 	uhci_fill_td(td, status, destination | (7 << 21),
850 		urbp->setup_packet_dma_handle);
851 
852 	/*
853 	 * If direction is "send", change the frame from SETUP (0x2D)
854 	 * to OUT (0xE1). Else change it from SETUP to IN (0x69).
855 	 */
856 	destination ^= (USB_PID_SETUP ^ usb_packetid(urb->pipe));
857 
858 	if (!(urb->transfer_flags & USB_DISABLE_SPD))
859 		status |= TD_CTRL_SPD;
860 
861 	/*
862 	 * Build the DATA TD's
863 	 */
864 	while (len > 0) {
865 		int pktsze = len;
866 
867 		if (pktsze > maxsze)
868 			pktsze = maxsze;
869 
870 		td = uhci_alloc_td(uhci, urb->dev);
871 		if (!td)
872 			return -ENOMEM;
873 
874 		/* Alternate Data0/1 (start with Data1) */
875 		destination ^= TD_TOKEN_TOGGLE;
876 
877 		uhci_add_td_to_urb(urb, td);
878 		uhci_fill_td(td, status, destination | ((pktsze - 1) << 21),
879 			data);
880 
881 		data += pktsze;
882 		len -= pktsze;
883 	}
884 
885 	/*
886 	 * Build the final TD for control status
887 	 */
888 	td = uhci_alloc_td(uhci, urb->dev);
889 	if (!td)
890 		return -ENOMEM;
891 
892 	/*
893 	 * It's IN if the pipe is an output pipe or we're not expecting
894 	 * data back.
895 	 */
896 	destination &= ~TD_TOKEN_PID_MASK;
897 	if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
898 		destination |= USB_PID_IN;
899 	else
900 		destination |= USB_PID_OUT;
901 
902 	destination |= TD_TOKEN_TOGGLE;		/* End in Data1 */
903 
904 	status &= ~TD_CTRL_SPD;
905 
906 	uhci_add_td_to_urb(urb, td);
907 	uhci_fill_td(td, status | TD_CTRL_IOC,
908 		destination | (UHCI_NULL_DATA_SIZE << 21), 0);
909 
910 	qh = uhci_alloc_qh(uhci, urb->dev);
911 	if (!qh)
912 		return -ENOMEM;
913 
914 	urbp->qh = qh;
915 	qh->urbp = urbp;
916 
917 	/* Low speed or small transfers gets a different queue and treatment */
918 	if (urb->pipe & TD_CTRL_LS) {
919 		uhci_insert_tds_in_qh(qh, urb, 0);
920 		uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb);
921 	} else {
922 		uhci_insert_tds_in_qh(qh, urb, 1);
923 		uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb);
924 		uhci_inc_fsbr(uhci, urb);
925 	}
926 
927 	return -EINPROGRESS;
928 }
929 
930 static int usb_control_retrigger_status(struct urb *urb);
931 
uhci_result_control(struct urb * urb)932 static int uhci_result_control(struct urb *urb)
933 {
934 	struct list_head *tmp, *head;
935 	struct urb_priv *urbp = urb->hcpriv;
936 	struct uhci_td *td;
937 	unsigned int status;
938 	int ret = 0;
939 
940 	if (list_empty(&urbp->td_list))
941 		return -EINVAL;
942 
943 	head = &urbp->td_list;
944 
945 	if (urbp->short_control_packet) {
946 		tmp = head->prev;
947 		goto status_phase;
948 	}
949 
950 	tmp = head->next;
951 	td = list_entry(tmp, struct uhci_td, list);
952 
953 	/* The first TD is the SETUP phase, check the status, but skip */
954 	/*  the count */
955 	status = uhci_status_bits(td->status);
956 	if (status & TD_CTRL_ACTIVE)
957 		return -EINPROGRESS;
958 
959 	if (status)
960 		goto td_error;
961 
962 	urb->actual_length = 0;
963 
964 	/* The rest of the TD's (but the last) are data */
965 	tmp = tmp->next;
966 	while (tmp != head && tmp->next != head) {
967 		td = list_entry(tmp, struct uhci_td, list);
968 
969 		tmp = tmp->next;
970 
971 		status = uhci_status_bits(td->status);
972 		if (status & TD_CTRL_ACTIVE)
973 			return -EINPROGRESS;
974 
975 		urb->actual_length += uhci_actual_length(td->status);
976 
977 		if (status)
978 			goto td_error;
979 
980 		/* Check to see if we received a short packet */
981 		if (uhci_actual_length(td->status) < uhci_expected_length(td->info)) {
982 			if (urb->transfer_flags & USB_DISABLE_SPD) {
983 				ret = -EREMOTEIO;
984 				goto err;
985 			}
986 
987 			if (uhci_packetid(td->info) == USB_PID_IN)
988 				return usb_control_retrigger_status(urb);
989 			else
990 				return 0;
991 		}
992 	}
993 
994 status_phase:
995 	td = list_entry(tmp, struct uhci_td, list);
996 
997 	/* Control status phase */
998 	status = uhci_status_bits(td->status);
999 
1000 #ifdef I_HAVE_BUGGY_APC_BACKUPS
1001 	/* APC BackUPS Pro kludge */
1002 	/* It tries to send all of the descriptor instead of the amount */
1003 	/*  we requested */
1004 	if (td->status & TD_CTRL_IOC &&	/* IOC is masked out by uhci_status_bits */
1005 	    status & TD_CTRL_ACTIVE &&
1006 	    status & TD_CTRL_NAK)
1007 		return 0;
1008 #endif
1009 
1010 	if (status & TD_CTRL_ACTIVE)
1011 		return -EINPROGRESS;
1012 
1013 	if (status)
1014 		goto td_error;
1015 
1016 	return 0;
1017 
1018 td_error:
1019 	ret = uhci_map_status(status, uhci_packetout(td->info));
1020 	if (ret == -EPIPE)
1021 		/* endpoint has stalled - mark it halted */
1022 		usb_endpoint_halt(urb->dev, uhci_endpoint(td->info),
1023 	    			uhci_packetout(td->info));
1024 
1025 err:
1026 	if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1027 		/* Some debugging code */
1028 		dbg("uhci_result_control() failed with status %x", status);
1029 
1030 		if (errbuf) {
1031 			/* Print the chain for debugging purposes */
1032 			uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1033 
1034 			lprintk(errbuf);
1035 		}
1036 	}
1037 
1038 	return ret;
1039 }
1040 
usb_control_retrigger_status(struct urb * urb)1041 static int usb_control_retrigger_status(struct urb *urb)
1042 {
1043 	struct list_head *tmp, *head;
1044 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1045 	struct uhci *uhci = urb->dev->bus->hcpriv;
1046 
1047 	urbp->short_control_packet = 1;
1048 
1049 	/* Create a new QH to avoid pointer overwriting problems */
1050 	uhci_remove_qh(uhci, urbp->qh);
1051 
1052 	/* Delete all of the TD's except for the status TD at the end */
1053 	head = &urbp->td_list;
1054 	tmp = head->next;
1055 	while (tmp != head && tmp->next != head) {
1056 		struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1057 
1058 		tmp = tmp->next;
1059 
1060 		uhci_remove_td_from_urb(td);
1061 		uhci_remove_td(uhci, td);
1062 		uhci_free_td(uhci, td);
1063 	}
1064 
1065 	urbp->qh = uhci_alloc_qh(uhci, urb->dev);
1066 	if (!urbp->qh) {
1067 		err("unable to allocate new QH for control retrigger");
1068 		return -ENOMEM;
1069 	}
1070 
1071 	urbp->qh->urbp = urbp;
1072 
1073 	/* One TD, who cares about Breadth first? */
1074 	uhci_insert_tds_in_qh(urbp->qh, urb, 0);
1075 
1076 	/* Low speed or small transfers gets a different queue and treatment */
1077 	if (urb->pipe & TD_CTRL_LS)
1078 		uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb);
1079 	else
1080 		uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb);
1081 
1082 	return -EINPROGRESS;
1083 }
1084 
1085 /*
1086  * Interrupt transfers
1087  */
uhci_submit_interrupt(struct urb * urb)1088 static int uhci_submit_interrupt(struct urb *urb)
1089 {
1090 	struct uhci_td *td;
1091 	unsigned long destination, status;
1092 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1093 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1094 
1095 	if (urb->transfer_buffer_length > usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))
1096 		return -EINVAL;
1097 
1098 	/* The "pipe" thing contains the destination in bits 8--18 */
1099 	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1100 
1101 	status = (urb->pipe & TD_CTRL_LS) | TD_CTRL_ACTIVE | TD_CTRL_IOC;
1102 
1103 	td = uhci_alloc_td(uhci, urb->dev);
1104 	if (!td)
1105 		return -ENOMEM;
1106 
1107 	destination |= (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT);
1108 	destination |= ((urb->transfer_buffer_length - 1) << 21);
1109 
1110 	usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
1111 
1112 	uhci_add_td_to_urb(urb, td);
1113 	uhci_fill_td(td, status, destination, urbp->transfer_buffer_dma_handle);
1114 
1115 	uhci_insert_td(uhci, uhci->skeltd[__interval_to_skel(urb->interval)], td);
1116 
1117 	return -EINPROGRESS;
1118 }
1119 
uhci_result_interrupt(struct urb * urb)1120 static int uhci_result_interrupt(struct urb *urb)
1121 {
1122 	struct list_head *tmp, *head;
1123 	struct urb_priv *urbp = urb->hcpriv;
1124 	struct uhci_td *td;
1125 	unsigned int status;
1126 	int ret = 0;
1127 
1128 	urb->actual_length = 0;
1129 
1130 	head = &urbp->td_list;
1131 	tmp = head->next;
1132 	while (tmp != head) {
1133 		td = list_entry(tmp, struct uhci_td, list);
1134 
1135 		tmp = tmp->next;
1136 
1137 		status = uhci_status_bits(td->status);
1138 		if (status & TD_CTRL_ACTIVE)
1139 			return -EINPROGRESS;
1140 
1141 		urb->actual_length += uhci_actual_length(td->status);
1142 
1143 		if (status)
1144 			goto td_error;
1145 
1146 		if (uhci_actual_length(td->status) < uhci_expected_length(td->info)) {
1147 			if (urb->transfer_flags & USB_DISABLE_SPD) {
1148 				ret = -EREMOTEIO;
1149 				goto err;
1150 			} else
1151 				return 0;
1152 		}
1153 	}
1154 
1155 	return 0;
1156 
1157 td_error:
1158 	ret = uhci_map_status(status, uhci_packetout(td->info));
1159 	if (ret == -EPIPE)
1160 		/* endpoint has stalled - mark it halted */
1161 		usb_endpoint_halt(urb->dev, uhci_endpoint(td->info),
1162 	    			uhci_packetout(td->info));
1163 
1164 err:
1165 	if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1166 		/* Some debugging code */
1167 		dbg("uhci_result_interrupt/bulk() failed with status %x",
1168 			status);
1169 
1170 		if (errbuf) {
1171 			/* Print the chain for debugging purposes */
1172 			if (urbp->qh)
1173 				uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1174 			else
1175 				uhci_show_td(td, errbuf, ERRBUF_LEN, 0);
1176 
1177 			lprintk(errbuf);
1178 		}
1179 	}
1180 
1181 	return ret;
1182 }
1183 
uhci_reset_interrupt(struct urb * urb)1184 static void uhci_reset_interrupt(struct urb *urb)
1185 {
1186 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1187 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1188 	struct uhci_td *td;
1189 	unsigned long flags;
1190 
1191 	spin_lock_irqsave(&urb->lock, flags);
1192 
1193 	/* Root hub is special */
1194 	if (urb->dev == uhci->rh.dev)
1195 		goto out;
1196 
1197 	td = list_entry(urbp->td_list.next, struct uhci_td, list);
1198 
1199 	td->status = (td->status & 0x2F000000) | TD_CTRL_ACTIVE | TD_CTRL_IOC;
1200 	td->info &= ~TD_TOKEN_TOGGLE;
1201 	td->info |= (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT);
1202 	usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
1203 
1204 out:
1205 	urb->status = -EINPROGRESS;
1206 
1207 	spin_unlock_irqrestore(&urb->lock, flags);
1208 }
1209 
1210 /*
1211  * Bulk transfers
1212  */
uhci_submit_bulk(struct urb * urb,struct urb * eurb)1213 static int uhci_submit_bulk(struct urb *urb, struct urb *eurb)
1214 {
1215 	struct uhci_td *td;
1216 	struct uhci_qh *qh;
1217 	unsigned long destination, status;
1218 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1219 	int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1220 	int len = urb->transfer_buffer_length;
1221 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1222 	dma_addr_t data = urbp->transfer_buffer_dma_handle;
1223 
1224 	if (len < 0 || maxsze <= 0)
1225 		return -EINVAL;
1226 
1227 	/* Can't have low speed bulk transfers */
1228 	if (urb->pipe & TD_CTRL_LS)
1229 		return -EINVAL;
1230 
1231 	/* The "pipe" thing contains the destination in bits 8--18 */
1232 	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1233 
1234 	/* 3 errors */
1235 	status = TD_CTRL_ACTIVE | (3 << TD_CTRL_C_ERR_SHIFT);
1236 
1237 	if (!(urb->transfer_flags & USB_DISABLE_SPD))
1238 		status |= TD_CTRL_SPD;
1239 
1240 	/*
1241 	 * Build the DATA TD's
1242 	 */
1243 	do {	/* Allow zero length packets */
1244 		int pktsze = len;
1245 
1246 		if (pktsze > maxsze)
1247 			pktsze = maxsze;
1248 
1249 		td = uhci_alloc_td(uhci, urb->dev);
1250 		if (!td)
1251 			return -ENOMEM;
1252 
1253 		uhci_add_td_to_urb(urb, td);
1254 		uhci_fill_td(td, status, destination |
1255 			(((pktsze - 1) & UHCI_NULL_DATA_SIZE) << 21) |
1256 			(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1257 			 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1258 			data);
1259 
1260 		data += pktsze;
1261 		len -= maxsze;
1262 
1263 		usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1264 			usb_pipeout(urb->pipe));
1265 	} while (len > 0);
1266 
1267 	/*
1268 	 * USB_ZERO_PACKET means adding a 0-length packet, if
1269 	 * direction is OUT and the transfer_length was an
1270 	 * exact multiple of maxsze, hence
1271 	 * (len = transfer_length - N * maxsze) == 0
1272 	 * however, if transfer_length == 0, the zero packet
1273 	 * was already prepared above.
1274 	 */
1275 	if (usb_pipeout(urb->pipe) && (urb->transfer_flags & USB_ZERO_PACKET) &&
1276 	   !len && urb->transfer_buffer_length) {
1277 		td = uhci_alloc_td(uhci, urb->dev);
1278 		if (!td)
1279 			return -ENOMEM;
1280 
1281 		uhci_add_td_to_urb(urb, td);
1282 		uhci_fill_td(td, status, destination |
1283 			(UHCI_NULL_DATA_SIZE << 21) |
1284 			(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1285 			 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1286 			data);
1287 
1288 		usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1289 			usb_pipeout(urb->pipe));
1290 	}
1291 
1292 	/* Set the flag on the last packet */
1293 	td->status |= TD_CTRL_IOC;
1294 
1295 	qh = uhci_alloc_qh(uhci, urb->dev);
1296 	if (!qh)
1297 		return -ENOMEM;
1298 
1299 	urbp->qh = qh;
1300 	qh->urbp = urbp;
1301 
1302 	/* Always assume breadth first */
1303 	uhci_insert_tds_in_qh(qh, urb, 1);
1304 
1305 	if (urb->transfer_flags & USB_QUEUE_BULK && eurb)
1306 		uhci_append_queued_urb(uhci, eurb, urb);
1307 	else
1308 		uhci_insert_qh(uhci, uhci->skel_bulk_qh, urb);
1309 
1310 	uhci_inc_fsbr(uhci, urb);
1311 
1312 	return -EINPROGRESS;
1313 }
1314 
1315 /* We can use the result interrupt since they're identical */
1316 #define uhci_result_bulk uhci_result_interrupt
1317 
1318 /*
1319  * Isochronous transfers
1320  */
isochronous_find_limits(struct urb * urb,unsigned int * start,unsigned int * end)1321 static int isochronous_find_limits(struct urb *urb, unsigned int *start, unsigned int *end)
1322 {
1323 	struct urb *last_urb = NULL;
1324 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1325 	struct list_head *tmp, *head;
1326 	int ret = 0;
1327 
1328 	head = &uhci->urb_list;
1329 	tmp = head->next;
1330 	while (tmp != head) {
1331 		struct urb *u = list_entry(tmp, struct urb, urb_list);
1332 
1333 		tmp = tmp->next;
1334 
1335 		/* look for pending URB's with identical pipe handle */
1336 		if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1337 		    (u->status == -EINPROGRESS) && (u != urb)) {
1338 			if (!last_urb)
1339 				*start = u->start_frame;
1340 			last_urb = u;
1341 		}
1342 	}
1343 
1344 	if (last_urb) {
1345 		*end = (last_urb->start_frame + last_urb->number_of_packets) & 1023;
1346 		ret = 0;
1347 	} else
1348 		ret = -1;	/* no previous urb found */
1349 
1350 	return ret;
1351 }
1352 
isochronous_find_start(struct urb * urb)1353 static int isochronous_find_start(struct urb *urb)
1354 {
1355 	int limits;
1356 	unsigned int start = 0, end = 0;
1357 
1358 	if (urb->number_of_packets > 900)	/* 900? Why? */
1359 		return -EFBIG;
1360 
1361 	limits = isochronous_find_limits(urb, &start, &end);
1362 
1363 	if (urb->transfer_flags & USB_ISO_ASAP) {
1364 		if (limits) {
1365 			int curframe;
1366 
1367 			curframe = uhci_get_current_frame_number(urb->dev) % UHCI_NUMFRAMES;
1368 			urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES;
1369 		} else
1370 			urb->start_frame = end;
1371 	} else {
1372 		urb->start_frame %= UHCI_NUMFRAMES;
1373 		/* FIXME: Sanity check */
1374 	}
1375 
1376 	return 0;
1377 }
1378 
1379 /*
1380  * Isochronous transfers
1381  */
uhci_submit_isochronous(struct urb * urb)1382 static int uhci_submit_isochronous(struct urb *urb)
1383 {
1384 	struct uhci_td *td;
1385 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1386 	int i, ret, framenum;
1387 	int status, destination;
1388 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1389 
1390 	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1391 	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1392 
1393 	ret = isochronous_find_start(urb);
1394 	if (ret)
1395 		return ret;
1396 
1397 	framenum = urb->start_frame;
1398 	for (i = 0; i < urb->number_of_packets; i++, framenum++) {
1399 		if (!urb->iso_frame_desc[i].length)
1400 			continue;
1401 
1402 		td = uhci_alloc_td(uhci, urb->dev);
1403 		if (!td)
1404 			return -ENOMEM;
1405 
1406 		uhci_add_td_to_urb(urb, td);
1407 		uhci_fill_td(td, status, destination | ((urb->iso_frame_desc[i].length - 1) << 21),
1408 			urbp->transfer_buffer_dma_handle + urb->iso_frame_desc[i].offset);
1409 
1410 		if (i + 1 >= urb->number_of_packets)
1411 			td->status |= TD_CTRL_IOC;
1412 
1413 		uhci_insert_td_frame_list(uhci, td, framenum);
1414 	}
1415 
1416 	return -EINPROGRESS;
1417 }
1418 
uhci_result_isochronous(struct urb * urb)1419 static int uhci_result_isochronous(struct urb *urb)
1420 {
1421 	struct list_head *tmp, *head;
1422 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1423 	int status;
1424 	int i, ret = 0;
1425 
1426 	urb->actual_length = 0;
1427 
1428 	i = 0;
1429 	head = &urbp->td_list;
1430 	tmp = head->next;
1431 	while (tmp != head) {
1432 		struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1433 		int actlength;
1434 
1435 		tmp = tmp->next;
1436 
1437 		if (td->status & TD_CTRL_ACTIVE)
1438 			return -EINPROGRESS;
1439 
1440 		actlength = uhci_actual_length(td->status);
1441 		urb->iso_frame_desc[i].actual_length = actlength;
1442 		urb->actual_length += actlength;
1443 
1444 		status = uhci_map_status(uhci_status_bits(td->status), usb_pipeout(urb->pipe));
1445 		urb->iso_frame_desc[i].status = status;
1446 		if (status) {
1447 			urb->error_count++;
1448 			ret = status;
1449 		}
1450 
1451 		i++;
1452 	}
1453 
1454 	return ret;
1455 }
1456 
1457 /*
1458  * MUST be called with uhci->urb_list_lock acquired
1459  */
uhci_find_urb_ep(struct uhci * uhci,struct urb * urb)1460 static struct urb *uhci_find_urb_ep(struct uhci *uhci, struct urb *urb)
1461 {
1462 	struct list_head *tmp, *head;
1463 
1464 	/* We don't match Isoc transfers since they are special */
1465 	if (usb_pipeisoc(urb->pipe))
1466 		return NULL;
1467 
1468 	head = &uhci->urb_list;
1469 	tmp = head->next;
1470 	while (tmp != head) {
1471 		struct urb *u = list_entry(tmp, struct urb, urb_list);
1472 
1473 		tmp = tmp->next;
1474 
1475 		if (u->dev == urb->dev && u->pipe == urb->pipe &&
1476 		    u->status == -EINPROGRESS)
1477 			return u;
1478 	}
1479 
1480 	return NULL;
1481 }
1482 
uhci_submit_urb(struct urb * urb)1483 static int uhci_submit_urb(struct urb *urb)
1484 {
1485 	int ret = -EINVAL;
1486 	struct uhci *uhci;
1487 	unsigned long flags;
1488 	struct urb *eurb;
1489 	int bustime;
1490 
1491 	if (!urb)
1492 		return -EINVAL;
1493 
1494 	if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv) {
1495 		warn("uhci_submit_urb: urb %p belongs to disconnected device or bus?", urb);
1496 		return -ENODEV;
1497 	}
1498 
1499 	uhci = (struct uhci *)urb->dev->bus->hcpriv;
1500 
1501 	usb_inc_dev_use(urb->dev);
1502 
1503 	spin_lock_irqsave(&uhci->urb_list_lock, flags);
1504 	spin_lock(&urb->lock);
1505 
1506 	if (urb->status == -EINPROGRESS || urb->status == -ECONNRESET ||
1507 	    urb->status == -ECONNABORTED) {
1508 		dbg("uhci_submit_urb: urb not available to submit (status = %d)", urb->status);
1509 		/* Since we can have problems on the out path */
1510 		spin_unlock(&urb->lock);
1511 		spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1512 		usb_dec_dev_use(urb->dev);
1513 
1514 		return ret;
1515 	}
1516 
1517 	INIT_LIST_HEAD(&urb->urb_list);
1518 	if (!uhci_alloc_urb_priv(uhci, urb)) {
1519 		ret = -ENOMEM;
1520 
1521 		goto out;
1522 	}
1523 
1524 	eurb = uhci_find_urb_ep(uhci, urb);
1525 	if (eurb && !(urb->transfer_flags & USB_QUEUE_BULK)) {
1526 		ret = -ENXIO;
1527 
1528 		goto out;
1529 	}
1530 
1531 	/* Short circuit the virtual root hub */
1532 	if (urb->dev == uhci->rh.dev) {
1533 		ret = rh_submit_urb(urb);
1534 
1535 		goto out;
1536 	}
1537 
1538 	switch (usb_pipetype(urb->pipe)) {
1539 	case PIPE_CONTROL:
1540 		ret = uhci_submit_control(urb);
1541 		break;
1542 	case PIPE_INTERRUPT:
1543 		if (urb->bandwidth == 0) {	/* not yet checked/allocated */
1544 			bustime = usb_check_bandwidth(urb->dev, urb);
1545 			if (bustime < 0)
1546 				ret = bustime;
1547 			else {
1548 				ret = uhci_submit_interrupt(urb);
1549 				if (ret == -EINPROGRESS)
1550 					usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1551 			}
1552 		} else		/* bandwidth is already set */
1553 			ret = uhci_submit_interrupt(urb);
1554 		break;
1555 	case PIPE_BULK:
1556 		ret = uhci_submit_bulk(urb, eurb);
1557 		break;
1558 	case PIPE_ISOCHRONOUS:
1559 		if (urb->bandwidth == 0) {	/* not yet checked/allocated */
1560 			if (urb->number_of_packets <= 0) {
1561 				ret = -EINVAL;
1562 				break;
1563 			}
1564 			bustime = usb_check_bandwidth(urb->dev, urb);
1565 			if (bustime < 0) {
1566 				ret = bustime;
1567 				break;
1568 			}
1569 
1570 			ret = uhci_submit_isochronous(urb);
1571 			if (ret == -EINPROGRESS)
1572 				usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1573 		} else		/* bandwidth is already set */
1574 			ret = uhci_submit_isochronous(urb);
1575 		break;
1576 	}
1577 
1578 out:
1579 	urb->status = ret;
1580 
1581 	if (ret == -EINPROGRESS) {
1582 		/* We use _tail to make find_urb_ep more efficient */
1583 		list_add_tail(&urb->urb_list, &uhci->urb_list);
1584 
1585 		spin_unlock(&urb->lock);
1586 		spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1587 
1588 		return 0;
1589 	}
1590 
1591 	uhci_unlink_generic(uhci, urb);
1592 
1593 	spin_unlock(&urb->lock);
1594 	spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1595 
1596 	/* Only call completion if it was successful */
1597 	if (!ret)
1598 		uhci_call_completion(urb);
1599 
1600 	return ret;
1601 }
1602 
1603 /*
1604  * Return the result of a transfer
1605  *
1606  * MUST be called with urb_list_lock acquired
1607  */
uhci_transfer_result(struct uhci * uhci,struct urb * urb)1608 static void uhci_transfer_result(struct uhci *uhci, struct urb *urb)
1609 {
1610 	int ret = -EINVAL;
1611 	unsigned long flags;
1612 	struct urb_priv *urbp;
1613 
1614 	/* The root hub is special */
1615 	if (urb->dev == uhci->rh.dev)
1616 		return;
1617 
1618 	spin_lock_irqsave(&urb->lock, flags);
1619 
1620 	urbp = (struct urb_priv *)urb->hcpriv;
1621 
1622 	if (urb->status != -EINPROGRESS) {
1623 		info("uhci_transfer_result: called for URB %p not in flight?", urb);
1624 		goto out;
1625 	}
1626 
1627 	switch (usb_pipetype(urb->pipe)) {
1628 	case PIPE_CONTROL:
1629 		ret = uhci_result_control(urb);
1630 		break;
1631 	case PIPE_INTERRUPT:
1632 		ret = uhci_result_interrupt(urb);
1633 		break;
1634 	case PIPE_BULK:
1635 		ret = uhci_result_bulk(urb);
1636 		break;
1637 	case PIPE_ISOCHRONOUS:
1638 		ret = uhci_result_isochronous(urb);
1639 		break;
1640 	}
1641 
1642 	urbp->status = ret;
1643 
1644 	if (ret == -EINPROGRESS)
1645 		goto out;
1646 
1647 	switch (usb_pipetype(urb->pipe)) {
1648 	case PIPE_CONTROL:
1649 	case PIPE_BULK:
1650 	case PIPE_ISOCHRONOUS:
1651 		/* Release bandwidth for Interrupt or Isoc. transfers */
1652 		/* Spinlock needed ? */
1653 		if (urb->bandwidth)
1654 			usb_release_bandwidth(urb->dev, urb, 1);
1655 		uhci_unlink_generic(uhci, urb);
1656 		break;
1657 	case PIPE_INTERRUPT:
1658 		/* Interrupts are an exception */
1659 		if (urb->interval)
1660 			goto out_complete;
1661 
1662 		/* Release bandwidth for Interrupt or Isoc. transfers */
1663 		/* Spinlock needed ? */
1664 		if (urb->bandwidth)
1665 			usb_release_bandwidth(urb->dev, urb, 0);
1666 		uhci_unlink_generic(uhci, urb);
1667 		break;
1668 	default:
1669 		info("uhci_transfer_result: unknown pipe type %d for urb %p\n",
1670 			usb_pipetype(urb->pipe), urb);
1671 	}
1672 
1673 	/* Remove it from uhci->urb_list */
1674 	list_del_init(&urb->urb_list);
1675 
1676 out_complete:
1677 	uhci_add_complete(urb);
1678 
1679 out:
1680 	spin_unlock_irqrestore(&urb->lock, flags);
1681 }
1682 
1683 /*
1684  * MUST be called with urb->lock acquired
1685  */
uhci_unlink_generic(struct uhci * uhci,struct urb * urb)1686 static void uhci_unlink_generic(struct uhci *uhci, struct urb *urb)
1687 {
1688 	struct list_head *head, *tmp;
1689 	struct urb_priv *urbp = urb->hcpriv;
1690 	int prevactive = 1;
1691 
1692 	/* We can get called when urbp allocation fails, so check */
1693 	if (!urbp)
1694 		return;
1695 
1696 	uhci_dec_fsbr(uhci, urb);	/* Safe since it checks */
1697 
1698 	/*
1699 	 * Now we need to find out what the last successful toggle was
1700 	 * so we can update the local data toggle for the next transfer
1701 	 *
1702 	 * There's 3 way's the last successful completed TD is found:
1703 	 *
1704 	 * 1) The TD is NOT active and the actual length < expected length
1705 	 * 2) The TD is NOT active and it's the last TD in the chain
1706 	 * 3) The TD is active and the previous TD is NOT active
1707 	 *
1708 	 * Control and Isochronous ignore the toggle, so this is safe
1709 	 * for all types
1710 	 */
1711 	head = &urbp->td_list;
1712 	tmp = head->next;
1713 	while (tmp != head) {
1714 		struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1715 
1716 		tmp = tmp->next;
1717 
1718 		if (!(td->status & TD_CTRL_ACTIVE) &&
1719 		    (uhci_actual_length(td->status) < uhci_expected_length(td->info) ||
1720 		    tmp == head))
1721 			usb_settoggle(urb->dev, uhci_endpoint(td->info),
1722 				uhci_packetout(td->info),
1723 				uhci_toggle(td->info) ^ 1);
1724 		else if ((td->status & TD_CTRL_ACTIVE) && !prevactive)
1725 			usb_settoggle(urb->dev, uhci_endpoint(td->info),
1726 				uhci_packetout(td->info),
1727 				uhci_toggle(td->info));
1728 
1729 		prevactive = td->status & TD_CTRL_ACTIVE;
1730 	}
1731 
1732 	uhci_delete_queued_urb(uhci, urb);
1733 
1734 	/* The interrupt loop will reclaim the QH's */
1735 	uhci_remove_qh(uhci, urbp->qh);
1736 	urbp->qh = NULL;
1737 }
1738 
uhci_unlink_urb(struct urb * urb)1739 static int uhci_unlink_urb(struct urb *urb)
1740 {
1741 	struct uhci *uhci;
1742 	unsigned long flags;
1743 	struct urb_priv *urbp = urb->hcpriv;
1744 
1745 	if (!urb)
1746 		return -EINVAL;
1747 
1748 	if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv)
1749 		return -ENODEV;
1750 
1751 	uhci = (struct uhci *)urb->dev->bus->hcpriv;
1752 
1753 	spin_lock_irqsave(&uhci->urb_list_lock, flags);
1754 	spin_lock(&urb->lock);
1755 
1756 	/* Release bandwidth for Interrupt or Isoc. transfers */
1757 	/* Spinlock needed ? */
1758 	if (urb->bandwidth) {
1759 		switch (usb_pipetype(urb->pipe)) {
1760 		case PIPE_INTERRUPT:
1761 			usb_release_bandwidth(urb->dev, urb, 0);
1762 			break;
1763 		case PIPE_ISOCHRONOUS:
1764 			usb_release_bandwidth(urb->dev, urb, 1);
1765 			break;
1766 		default:
1767 			break;
1768 		}
1769 	}
1770 
1771 	if (urb->status != -EINPROGRESS) {
1772 		spin_unlock(&urb->lock);
1773 		spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1774 		return 0;
1775 	}
1776 
1777 	list_del_init(&urb->urb_list);
1778 
1779 	uhci_unlink_generic(uhci, urb);
1780 
1781 	/* Short circuit the virtual root hub */
1782 	if (urb->dev == uhci->rh.dev) {
1783 		rh_unlink_urb(urb);
1784 
1785 		spin_unlock(&urb->lock);
1786 		spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1787 
1788 		uhci_call_completion(urb);
1789 	} else {
1790 		if (urb->transfer_flags & USB_ASYNC_UNLINK) {
1791 			urbp->status = urb->status = -ECONNABORTED;
1792 
1793 			spin_lock(&uhci->urb_remove_list_lock);
1794 
1795 			/* If we're the first, set the next interrupt bit */
1796 			if (list_empty(&uhci->urb_remove_list))
1797 				uhci_set_next_interrupt(uhci);
1798 
1799 			list_add(&urb->urb_list, &uhci->urb_remove_list);
1800 
1801 			spin_unlock(&uhci->urb_remove_list_lock);
1802 
1803 			spin_unlock(&urb->lock);
1804 			spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1805 
1806 		} else {
1807 			urb->status = -ENOENT;
1808 
1809 			spin_unlock(&urb->lock);
1810 			spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
1811 
1812 			if (in_interrupt()) {	/* wait at least 1 frame */
1813 				static int errorcount = 10;
1814 
1815 				if (errorcount--)
1816 					dbg("uhci_unlink_urb called from interrupt for urb %p", urb);
1817 				udelay(1000);
1818 			} else
1819 				schedule_timeout(1+1*HZ/1000);
1820 
1821 			uhci_call_completion(urb);
1822 		}
1823 	}
1824 
1825 	return 0;
1826 }
1827 
uhci_fsbr_timeout(struct uhci * uhci,struct urb * urb)1828 static int uhci_fsbr_timeout(struct uhci *uhci, struct urb *urb)
1829 {
1830 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1831 	struct list_head *head, *tmp;
1832 	int count = 0;
1833 
1834 	uhci_dec_fsbr(uhci, urb);
1835 
1836 	urbp->fsbr_timeout = 1;
1837 
1838 	/*
1839 	 * Ideally we would want to fix qh->element as well, but it's
1840 	 * read/write by the HC, so that can introduce a race. It's not
1841 	 * really worth the hassle
1842 	 */
1843 
1844 	head = &urbp->td_list;
1845 	tmp = head->next;
1846 	while (tmp != head) {
1847 		struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1848 
1849 		tmp = tmp->next;
1850 
1851 		/*
1852 		 * Make sure we don't do the last one (since it'll have the
1853 		 * TERM bit set) as well as we skip every so many TD's to
1854 		 * make sure it doesn't hog the bandwidth
1855 		 */
1856 		if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1))
1857 			td->link |= UHCI_PTR_DEPTH;
1858 
1859 		count++;
1860 	}
1861 
1862 	return 0;
1863 }
1864 
1865 /*
1866  * uhci_get_current_frame_number()
1867  *
1868  * returns the current frame number for a USB bus/controller.
1869  */
uhci_get_current_frame_number(struct usb_device * dev)1870 static int uhci_get_current_frame_number(struct usb_device *dev)
1871 {
1872 	struct uhci *uhci = (struct uhci *)dev->bus->hcpriv;
1873 
1874 	return inw(uhci->io_addr + USBFRNUM);
1875 }
1876 
1877 struct usb_operations uhci_device_operations = {
1878 	uhci_alloc_dev,
1879 	uhci_free_dev,
1880 	uhci_get_current_frame_number,
1881 	uhci_submit_urb,
1882 	uhci_unlink_urb
1883 };
1884 
1885 /* Virtual Root Hub */
1886 
1887 static __u8 root_hub_dev_des[] =
1888 {
1889  	0x12,			/*  __u8  bLength; */
1890 	0x01,			/*  __u8  bDescriptorType; Device */
1891 	0x00,			/*  __u16 bcdUSB; v1.0 */
1892 	0x01,
1893 	0x09,			/*  __u8  bDeviceClass; HUB_CLASSCODE */
1894 	0x00,			/*  __u8  bDeviceSubClass; */
1895 	0x00,			/*  __u8  bDeviceProtocol; */
1896 	0x08,			/*  __u8  bMaxPacketSize0; 8 Bytes */
1897 	0x00,			/*  __u16 idVendor; */
1898 	0x00,
1899 	0x00,			/*  __u16 idProduct; */
1900 	0x00,
1901 	0x00,			/*  __u16 bcdDevice; */
1902 	0x00,
1903 	0x00,			/*  __u8  iManufacturer; */
1904 	0x02,			/*  __u8  iProduct; */
1905 	0x01,			/*  __u8  iSerialNumber; */
1906 	0x01			/*  __u8  bNumConfigurations; */
1907 };
1908 
1909 
1910 /* Configuration descriptor */
1911 static __u8 root_hub_config_des[] =
1912 {
1913 	0x09,			/*  __u8  bLength; */
1914 	0x02,			/*  __u8  bDescriptorType; Configuration */
1915 	0x19,			/*  __u16 wTotalLength; */
1916 	0x00,
1917 	0x01,			/*  __u8  bNumInterfaces; */
1918 	0x01,			/*  __u8  bConfigurationValue; */
1919 	0x00,			/*  __u8  iConfiguration; */
1920 	0x40,			/*  __u8  bmAttributes;
1921 					Bit 7: Bus-powered, 6: Self-powered,
1922 					Bit 5 Remote-wakeup, 4..0: resvd */
1923 	0x00,			/*  __u8  MaxPower; */
1924 
1925 	/* interface */
1926 	0x09,			/*  __u8  if_bLength; */
1927 	0x04,			/*  __u8  if_bDescriptorType; Interface */
1928 	0x00,			/*  __u8  if_bInterfaceNumber; */
1929 	0x00,			/*  __u8  if_bAlternateSetting; */
1930 	0x01,			/*  __u8  if_bNumEndpoints; */
1931 	0x09,			/*  __u8  if_bInterfaceClass; HUB_CLASSCODE */
1932 	0x00,			/*  __u8  if_bInterfaceSubClass; */
1933 	0x00,			/*  __u8  if_bInterfaceProtocol; */
1934 	0x00,			/*  __u8  if_iInterface; */
1935 
1936 	/* endpoint */
1937 	0x07,			/*  __u8  ep_bLength; */
1938 	0x05,			/*  __u8  ep_bDescriptorType; Endpoint */
1939 	0x81,			/*  __u8  ep_bEndpointAddress; IN Endpoint 1 */
1940 	0x03,			/*  __u8  ep_bmAttributes; Interrupt */
1941 	0x08,			/*  __u16 ep_wMaxPacketSize; 8 Bytes */
1942 	0x00,
1943 	0xff			/*  __u8  ep_bInterval; 255 ms */
1944 };
1945 
1946 static __u8 root_hub_hub_des[] =
1947 {
1948 	0x09,			/*  __u8  bLength; */
1949 	0x29,			/*  __u8  bDescriptorType; Hub-descriptor */
1950 	0x02,			/*  __u8  bNbrPorts; */
1951 	0x00,			/* __u16  wHubCharacteristics; */
1952 	0x00,
1953 	0x01,			/*  __u8  bPwrOn2pwrGood; 2ms */
1954 	0x00,			/*  __u8  bHubContrCurrent; 0 mA */
1955 	0x00,			/*  __u8  DeviceRemovable; *** 7 Ports max *** */
1956 	0xff			/*  __u8  PortPwrCtrlMask; *** 7 ports max *** */
1957 };
1958 
1959 /* prepare Interrupt pipe transaction data; HUB INTERRUPT ENDPOINT */
rh_send_irq(struct urb * urb)1960 static int rh_send_irq(struct urb *urb)
1961 {
1962 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1963 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1964 	unsigned int io_addr = uhci->io_addr;
1965 	unsigned long flags;
1966 	int i, len = 1;
1967 	__u16 data = 0;
1968 
1969 	spin_lock_irqsave(&urb->lock, flags);
1970 	for (i = 0; i < uhci->rh.numports; i++) {
1971 		data |= ((inw(io_addr + USBPORTSC1 + i * 2) & 0xa) > 0 ? (1 << (i + 1)) : 0);
1972 		len = (i + 1) / 8 + 1;
1973 	}
1974 
1975 	*(__u16 *) urb->transfer_buffer = cpu_to_le16(data);
1976 	urb->actual_length = len;
1977 	urbp->status = 0;
1978 
1979 	spin_unlock_irqrestore(&urb->lock, flags);
1980 
1981 	if ((data > 0) && (uhci->rh.send != 0)) {
1982 		dbg("root-hub INT complete: port1: %x port2: %x data: %x",
1983 			inw(io_addr + USBPORTSC1), inw(io_addr + USBPORTSC2), data);
1984 		uhci_call_completion(urb);
1985 	}
1986 
1987 	return 0;
1988 }
1989 
1990 /* Virtual Root Hub INTs are polled by this timer every "interval" ms */
1991 static int rh_init_int_timer(struct urb *urb);
1992 
rh_int_timer_do(unsigned long ptr)1993 static void rh_int_timer_do(unsigned long ptr)
1994 {
1995 	struct urb *urb = (struct urb *)ptr;
1996 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
1997 	struct list_head list, *tmp, *head;
1998 	unsigned long flags;
1999 
2000 	if (uhci->rh.send)
2001 		rh_send_irq(urb);
2002 
2003 	INIT_LIST_HEAD(&list);
2004 
2005 	spin_lock_irqsave(&uhci->urb_list_lock, flags);
2006 	head = &uhci->urb_list;
2007 	tmp = head->next;
2008 	while (tmp != head) {
2009 		struct urb *u = list_entry(tmp, struct urb, urb_list);
2010 		struct urb_priv *up = (struct urb_priv *)u->hcpriv;
2011 
2012 		tmp = tmp->next;
2013 
2014 		spin_lock(&u->lock);
2015 
2016 		/* Check if the FSBR timed out */
2017 		if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
2018 			uhci_fsbr_timeout(uhci, u);
2019 
2020 		/* Check if the URB timed out */
2021 		if (u->timeout && time_after_eq(jiffies, up->inserttime + u->timeout)) {
2022 			list_del(&u->urb_list);
2023 			list_add_tail(&u->urb_list, &list);
2024 		}
2025 
2026 		spin_unlock(&u->lock);
2027 	}
2028 	spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
2029 
2030 	head = &list;
2031 	tmp = head->next;
2032 	while (tmp != head) {
2033 		struct urb *u = list_entry(tmp, struct urb, urb_list);
2034 
2035 		tmp = tmp->next;
2036 
2037 		u->transfer_flags |= USB_ASYNC_UNLINK | USB_TIMEOUT_KILLED;
2038 		uhci_unlink_urb(u);
2039 	}
2040 
2041 	/* Really disable FSBR */
2042 	if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
2043 		uhci->fsbrtimeout = 0;
2044 		uhci->skel_term_qh->link = UHCI_PTR_TERM;
2045 	}
2046 
2047 	/* enter global suspend if nothing connected */
2048 	if (!uhci->is_suspended && !ports_active(uhci))
2049 		suspend_hc(uhci);
2050 
2051 	rh_init_int_timer(urb);
2052 }
2053 
2054 /* Root Hub INTs are polled by this timer */
rh_init_int_timer(struct urb * urb)2055 static int rh_init_int_timer(struct urb *urb)
2056 {
2057 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
2058 
2059 	uhci->rh.interval = urb->interval;
2060 	init_timer(&uhci->rh.rh_int_timer);
2061 	uhci->rh.rh_int_timer.function = rh_int_timer_do;
2062 	uhci->rh.rh_int_timer.data = (unsigned long)urb;
2063 	uhci->rh.rh_int_timer.expires = jiffies + (HZ * (urb->interval < 30 ? 30 : urb->interval)) / 1000;
2064 	add_timer(&uhci->rh.rh_int_timer);
2065 
2066 	return 0;
2067 }
2068 
2069 #define OK(x)			len = (x); break
2070 
2071 #define CLR_RH_PORTSTAT(x) \
2072 	status = inw(io_addr + USBPORTSC1 + 2 * (wIndex-1)); \
2073 	status = (status & 0xfff5) & ~(x); \
2074 	outw(status, io_addr + USBPORTSC1 + 2 * (wIndex-1))
2075 
2076 #define SET_RH_PORTSTAT(x) \
2077 	status = inw(io_addr + USBPORTSC1 + 2 * (wIndex-1)); \
2078 	status = (status & 0xfff5) | (x); \
2079 	outw(status, io_addr + USBPORTSC1 + 2 * (wIndex-1))
2080 
2081 
2082 /* Root Hub Control Pipe */
rh_submit_urb(struct urb * urb)2083 static int rh_submit_urb(struct urb *urb)
2084 {
2085 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
2086 	unsigned int pipe = urb->pipe;
2087 	struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *)urb->setup_packet;
2088 	void *data = urb->transfer_buffer;
2089 	int leni = urb->transfer_buffer_length;
2090 	int len = 0;
2091 	int status = 0;
2092 	int stat = 0;
2093 	int i;
2094 	unsigned int io_addr = uhci->io_addr;
2095 	__u16 cstatus;
2096 	__u16 bmRType_bReq;
2097 	__u16 wValue;
2098 	__u16 wIndex;
2099 	__u16 wLength;
2100 
2101 	if (usb_pipetype(pipe) == PIPE_INTERRUPT) {
2102 		uhci->rh.urb = urb;
2103 		uhci->rh.send = 1;
2104 		uhci->rh.interval = urb->interval;
2105 		rh_init_int_timer(urb);
2106 
2107 		return -EINPROGRESS;
2108 	}
2109 
2110 	bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
2111 	wValue = le16_to_cpu(cmd->wValue);
2112 	wIndex = le16_to_cpu(cmd->wIndex);
2113 	wLength = le16_to_cpu(cmd->wLength);
2114 
2115 	for (i = 0; i < 8; i++)
2116 		uhci->rh.c_p_r[i] = 0;
2117 
2118 	switch (bmRType_bReq) {
2119 		/* Request Destination:
2120 		   without flags: Device,
2121 		   RH_INTERFACE: interface,
2122 		   RH_ENDPOINT: endpoint,
2123 		   RH_CLASS means HUB here,
2124 		   RH_OTHER | RH_CLASS  almost ever means HUB_PORT here
2125 		*/
2126 
2127 	case RH_GET_STATUS:
2128 		*(__u16 *)data = cpu_to_le16(1);
2129 		OK(2);
2130 	case RH_GET_STATUS | RH_INTERFACE:
2131 		*(__u16 *)data = cpu_to_le16(0);
2132 		OK(2);
2133 	case RH_GET_STATUS | RH_ENDPOINT:
2134 		*(__u16 *)data = cpu_to_le16(0);
2135 		OK(2);
2136 	case RH_GET_STATUS | RH_CLASS:
2137 		*(__u32 *)data = cpu_to_le32(0);
2138 		OK(4);		/* hub power */
2139 	case RH_GET_STATUS | RH_OTHER | RH_CLASS:
2140 		status = inw(io_addr + USBPORTSC1 + 2 * (wIndex - 1));
2141 		cstatus = ((status & USBPORTSC_CSC) >> (1 - 0)) |
2142 			((status & USBPORTSC_PEC) >> (3 - 1)) |
2143 			(uhci->rh.c_p_r[wIndex - 1] << (0 + 4));
2144 			status = (status & USBPORTSC_CCS) |
2145 			((status & USBPORTSC_PE) >> (2 - 1)) |
2146 			((status & USBPORTSC_SUSP) >> (12 - 2)) |
2147 			((status & USBPORTSC_PR) >> (9 - 4)) |
2148 			(1 << 8) |      /* power on */
2149 			((status & USBPORTSC_LSDA) << (-8 + 9));
2150 
2151 		*(__u16 *)data = cpu_to_le16(status);
2152 		*(__u16 *)(data + 2) = cpu_to_le16(cstatus);
2153 		OK(4);
2154 	case RH_CLEAR_FEATURE | RH_ENDPOINT:
2155 		switch (wValue) {
2156 		case RH_ENDPOINT_STALL:
2157 			OK(0);
2158 		}
2159 		break;
2160 	case RH_CLEAR_FEATURE | RH_CLASS:
2161 		switch (wValue) {
2162 		case RH_C_HUB_OVER_CURRENT:
2163 			OK(0);	/* hub power over current */
2164 		}
2165 		break;
2166 	case RH_CLEAR_FEATURE | RH_OTHER | RH_CLASS:
2167 		switch (wValue) {
2168 		case RH_PORT_ENABLE:
2169 			CLR_RH_PORTSTAT(USBPORTSC_PE);
2170 			OK(0);
2171 		case RH_PORT_SUSPEND:
2172 			CLR_RH_PORTSTAT(USBPORTSC_SUSP);
2173 			OK(0);
2174 		case RH_PORT_POWER:
2175 			OK(0);	/* port power */
2176 		case RH_C_PORT_CONNECTION:
2177 			SET_RH_PORTSTAT(USBPORTSC_CSC);
2178 			OK(0);
2179 		case RH_C_PORT_ENABLE:
2180 			SET_RH_PORTSTAT(USBPORTSC_PEC);
2181 			OK(0);
2182 		case RH_C_PORT_SUSPEND:
2183 			/*** WR_RH_PORTSTAT(RH_PS_PSSC); */
2184 			OK(0);
2185 		case RH_C_PORT_OVER_CURRENT:
2186 			OK(0);	/* port power over current */
2187 		case RH_C_PORT_RESET:
2188 			uhci->rh.c_p_r[wIndex - 1] = 0;
2189 			OK(0);
2190 		}
2191 		break;
2192 	case RH_SET_FEATURE | RH_OTHER | RH_CLASS:
2193 		switch (wValue) {
2194 		case RH_PORT_SUSPEND:
2195 			SET_RH_PORTSTAT(USBPORTSC_SUSP);
2196 			OK(0);
2197 		case RH_PORT_RESET:
2198 			SET_RH_PORTSTAT(USBPORTSC_PR);
2199 			mdelay(50);	/* USB v1.1 7.1.7.3 */
2200 			uhci->rh.c_p_r[wIndex - 1] = 1;
2201 			CLR_RH_PORTSTAT(USBPORTSC_PR);
2202 			udelay(10);
2203 			SET_RH_PORTSTAT(USBPORTSC_PE);
2204 			mdelay(10);
2205 			SET_RH_PORTSTAT(0xa);
2206 			OK(0);
2207 		case RH_PORT_POWER:
2208 			OK(0); /* port power ** */
2209 		case RH_PORT_ENABLE:
2210 			SET_RH_PORTSTAT(USBPORTSC_PE);
2211 			OK(0);
2212 		}
2213 		break;
2214 	case RH_SET_ADDRESS:
2215 		uhci->rh.devnum = wValue;
2216 		OK(0);
2217 	case RH_GET_DESCRIPTOR:
2218 		switch ((wValue & 0xff00) >> 8) {
2219 		case 0x01:	/* device descriptor */
2220 			len = min_t(unsigned int, leni,
2221 				  min_t(unsigned int,
2222 				      sizeof(root_hub_dev_des), wLength));
2223 			memcpy(data, root_hub_dev_des, len);
2224 			OK(len);
2225 		case 0x02:	/* configuration descriptor */
2226 			len = min_t(unsigned int, leni,
2227 				  min_t(unsigned int,
2228 				      sizeof(root_hub_config_des), wLength));
2229 			memcpy (data, root_hub_config_des, len);
2230 			OK(len);
2231 		case 0x03:	/* string descriptors */
2232 			len = usb_root_hub_string (wValue & 0xff,
2233 				uhci->io_addr, "UHCI-alt",
2234 				data, wLength);
2235 			if (len > 0) {
2236 				OK(min_t(int, leni, len));
2237 			} else
2238 				stat = -EPIPE;
2239 		}
2240 		break;
2241 	case RH_GET_DESCRIPTOR | RH_CLASS:
2242 		root_hub_hub_des[2] = uhci->rh.numports;
2243 		len = min_t(unsigned int, leni,
2244 			  min_t(unsigned int, sizeof(root_hub_hub_des), wLength));
2245 		memcpy(data, root_hub_hub_des, len);
2246 		OK(len);
2247 	case RH_GET_CONFIGURATION:
2248 		*(__u8 *)data = 0x01;
2249 		OK(1);
2250 	case RH_SET_CONFIGURATION:
2251 		OK(0);
2252 	case RH_GET_INTERFACE | RH_INTERFACE:
2253 		*(__u8 *)data = 0x00;
2254 		OK(1);
2255 	case RH_SET_INTERFACE | RH_INTERFACE:
2256 		OK(0);
2257 	default:
2258 		stat = -EPIPE;
2259 	}
2260 
2261 	urb->actual_length = len;
2262 
2263 	return stat;
2264 }
2265 
2266 /*
2267  * MUST be called with urb->lock acquired
2268  */
rh_unlink_urb(struct urb * urb)2269 static int rh_unlink_urb(struct urb *urb)
2270 {
2271 	struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
2272 
2273 	if (uhci->rh.urb == urb) {
2274 		urb->status = -ENOENT;
2275 		uhci->rh.send = 0;
2276 		uhci->rh.urb = NULL;
2277 		del_timer(&uhci->rh.rh_int_timer);
2278 	}
2279 	return 0;
2280 }
2281 
uhci_free_pending_qhs(struct uhci * uhci)2282 static void uhci_free_pending_qhs(struct uhci *uhci)
2283 {
2284 	struct list_head *tmp, *head;
2285 	unsigned long flags;
2286 
2287 	spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
2288 	head = &uhci->qh_remove_list;
2289 	tmp = head->next;
2290 	while (tmp != head) {
2291 		struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list);
2292 
2293 		tmp = tmp->next;
2294 
2295 		list_del_init(&qh->remove_list);
2296 
2297 		uhci_free_qh(uhci, qh);
2298 	}
2299 	spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags);
2300 }
2301 
uhci_call_completion(struct urb * urb)2302 static void uhci_call_completion(struct urb *urb)
2303 {
2304 	struct urb_priv *urbp;
2305 	struct usb_device *dev = urb->dev;
2306 	struct uhci *uhci = (struct uhci *)dev->bus->hcpriv;
2307 	int is_ring = 0, killed, resubmit_interrupt, status;
2308 	struct urb *nurb;
2309 	unsigned long flags;
2310 
2311 	spin_lock_irqsave(&urb->lock, flags);
2312 
2313 	urbp = (struct urb_priv *)urb->hcpriv;
2314 	if (!urbp || !urb->dev) {
2315 		spin_unlock_irqrestore(&urb->lock, flags);
2316 		return;
2317 	}
2318 
2319 	killed = (urb->status == -ENOENT || urb->status == -ECONNABORTED ||
2320 			urb->status == -ECONNRESET);
2321 	resubmit_interrupt = (usb_pipetype(urb->pipe) == PIPE_INTERRUPT &&
2322 			urb->interval);
2323 
2324 	nurb = urb->next;
2325 	if (nurb && !killed) {
2326 		int count = 0;
2327 
2328 		while (nurb && nurb != urb && count < MAX_URB_LOOP) {
2329 			if (nurb->status == -ENOENT ||
2330 			    nurb->status == -ECONNABORTED ||
2331 			    nurb->status == -ECONNRESET) {
2332 				killed = 1;
2333 				break;
2334 			}
2335 
2336 			nurb = nurb->next;
2337 			count++;
2338 		}
2339 
2340 		if (count == MAX_URB_LOOP)
2341 			err("uhci_call_completion: too many linked URB's, loop? (first loop)");
2342 
2343 		/* Check to see if chain is a ring */
2344 		is_ring = (nurb == urb);
2345 	}
2346 
2347 	if (urbp->transfer_buffer_dma_handle)
2348 		pci_dma_sync_single(uhci->dev, urbp->transfer_buffer_dma_handle,
2349 			urb->transfer_buffer_length, usb_pipein(urb->pipe) ?
2350 			PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
2351 
2352 	if (urbp->setup_packet_dma_handle)
2353 		pci_dma_sync_single(uhci->dev, urbp->setup_packet_dma_handle,
2354 			sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
2355 
2356 	status = urbp->status;
2357 	if (!resubmit_interrupt || killed)
2358 		/* We don't need urb_priv anymore */
2359 		uhci_destroy_urb_priv(urb);
2360 
2361 	if (!killed)
2362 		urb->status = status;
2363 
2364 	urb->dev = NULL;
2365 	spin_unlock_irqrestore(&urb->lock, flags);
2366 
2367 	if (urb->complete)
2368 		urb->complete(urb);
2369 
2370 	if (resubmit_interrupt)
2371 		/* Recheck the status. The completion handler may have */
2372 		/*  unlinked the resubmitting interrupt URB */
2373 		killed = (urb->status == -ENOENT ||
2374 			  urb->status == -ECONNABORTED ||
2375 			  urb->status == -ECONNRESET);
2376 
2377 	if (resubmit_interrupt && !killed) {
2378 		urb->dev = dev;
2379 		uhci_reset_interrupt(urb);
2380 	} else {
2381 		if (is_ring && !killed) {
2382 			urb->dev = dev;
2383 			uhci_submit_urb(urb);
2384 		} else {
2385 			/* We decrement the usage count after we're done */
2386 			/*  with everything */
2387 			usb_dec_dev_use(dev);
2388 		}
2389 	}
2390 }
2391 
uhci_finish_completion(struct uhci * uhci)2392 static void uhci_finish_completion(struct uhci *uhci)
2393 {
2394 	struct list_head *tmp, *head;
2395 	unsigned long flags;
2396 
2397 	spin_lock_irqsave(&uhci->complete_list_lock, flags);
2398 	head = &uhci->complete_list;
2399 	tmp = head->next;
2400 	while (tmp != head) {
2401 		struct urb_priv *urbp = list_entry(tmp, struct urb_priv, complete_list);
2402 		struct urb *urb = urbp->urb;
2403 
2404 		list_del_init(&urbp->complete_list);
2405 		spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
2406 
2407 		uhci_call_completion(urb);
2408 
2409 		spin_lock_irqsave(&uhci->complete_list_lock, flags);
2410 		head = &uhci->complete_list;
2411 		tmp = head->next;
2412 	}
2413 	spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
2414 }
2415 
uhci_remove_pending_qhs(struct uhci * uhci)2416 static void uhci_remove_pending_qhs(struct uhci *uhci)
2417 {
2418 	struct list_head *tmp, *head;
2419 	unsigned long flags;
2420 
2421 	spin_lock_irqsave(&uhci->urb_remove_list_lock, flags);
2422 	head = &uhci->urb_remove_list;
2423 	tmp = head->next;
2424 	while (tmp != head) {
2425 		struct urb *urb = list_entry(tmp, struct urb, urb_list);
2426 		struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
2427 
2428 		tmp = tmp->next;
2429 
2430 		list_del_init(&urb->urb_list);
2431 
2432 		urbp->status = urb->status = -ECONNRESET;
2433 
2434 		uhci_add_complete(urb);
2435 	}
2436 	spin_unlock_irqrestore(&uhci->urb_remove_list_lock, flags);
2437 }
2438 
uhci_interrupt(int irq,void * __uhci,struct pt_regs * regs)2439 static void uhci_interrupt(int irq, void *__uhci, struct pt_regs *regs)
2440 {
2441 	struct uhci *uhci = __uhci;
2442 	unsigned int io_addr = uhci->io_addr;
2443 	unsigned short status;
2444 	struct list_head *tmp, *head;
2445 
2446 	/*
2447 	 * Read the interrupt status, and write it back to clear the
2448 	 * interrupt cause
2449 	 */
2450 	status = inw(io_addr + USBSTS);
2451 	if (!status)	/* shared interrupt, not mine */
2452 		return;
2453 	outw(status, io_addr + USBSTS);		/* Clear it */
2454 
2455 	if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
2456 		if (status & USBSTS_HSE)
2457 			err("%x: host system error, PCI problems?", io_addr);
2458 		if (status & USBSTS_HCPE)
2459 			err("%x: host controller process error. something bad happened", io_addr);
2460 		if ((status & USBSTS_HCH) && !uhci->is_suspended) {
2461 			err("%x: host controller halted. very bad", io_addr);
2462 			/* FIXME: Reset the controller, fix the offending TD */
2463 		}
2464 	}
2465 
2466 	if (status & USBSTS_RD)
2467 		wakeup_hc(uhci);
2468 
2469 	uhci_free_pending_qhs(uhci);
2470 
2471 	uhci_remove_pending_qhs(uhci);
2472 
2473 	uhci_clear_next_interrupt(uhci);
2474 
2475 	/* Walk the list of pending URB's to see which ones completed */
2476 	spin_lock(&uhci->urb_list_lock);
2477 	head = &uhci->urb_list;
2478 	tmp = head->next;
2479 	while (tmp != head) {
2480 		struct urb *urb = list_entry(tmp, struct urb, urb_list);
2481 
2482 		tmp = tmp->next;
2483 
2484 		/* Checks the status and does all of the magic necessary */
2485 		uhci_transfer_result(uhci, urb);
2486 	}
2487 	spin_unlock(&uhci->urb_list_lock);
2488 
2489 	uhci_finish_completion(uhci);
2490 }
2491 
reset_hc(struct uhci * uhci)2492 static void reset_hc(struct uhci *uhci)
2493 {
2494 	unsigned int io_addr = uhci->io_addr;
2495 
2496 	/* Global reset for 50ms */
2497 	outw(USBCMD_GRESET, io_addr + USBCMD);
2498 	wait_ms(50);
2499 	outw(0, io_addr + USBCMD);
2500 	wait_ms(10);
2501 }
2502 
suspend_hc(struct uhci * uhci)2503 static void suspend_hc(struct uhci *uhci)
2504 {
2505 	unsigned int io_addr = uhci->io_addr;
2506 
2507 	dbg("%x: suspend_hc", io_addr);
2508 
2509 	outw(USBCMD_EGSM, io_addr + USBCMD);
2510 
2511 	uhci->is_suspended = 1;
2512 }
2513 
wakeup_hc(struct uhci * uhci)2514 static void wakeup_hc(struct uhci *uhci)
2515 {
2516 	unsigned int io_addr = uhci->io_addr;
2517 	unsigned int status;
2518 
2519 	dbg("%x: wakeup_hc", io_addr);
2520 
2521 	outw(0, io_addr + USBCMD);
2522 
2523 	/* wait for EOP to be sent */
2524 	status = inw(io_addr + USBCMD);
2525 	while (status & USBCMD_FGR)
2526 		status = inw(io_addr + USBCMD);
2527 
2528 	uhci->is_suspended = 0;
2529 
2530 	/* Run and mark it configured with a 64-byte max packet */
2531 	outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
2532 }
2533 
ports_active(struct uhci * uhci)2534 static int ports_active(struct uhci *uhci)
2535 {
2536 	unsigned int io_addr = uhci->io_addr;
2537 	int connection = 0;
2538 	int i;
2539 
2540 	for (i = 0; i < uhci->rh.numports; i++)
2541 		connection |= (inw(io_addr + USBPORTSC1 + i * 2) & 0x1);
2542 
2543 	return connection;
2544 }
2545 
start_hc(struct uhci * uhci)2546 static void start_hc(struct uhci *uhci)
2547 {
2548 	unsigned int io_addr = uhci->io_addr;
2549 	int timeout = 1000;
2550 
2551 	/*
2552 	 * Reset the HC - this will force us to get a
2553 	 * new notification of any already connected
2554 	 * ports due to the virtual disconnect that it
2555 	 * implies.
2556 	 */
2557 	outw(USBCMD_HCRESET, io_addr + USBCMD);
2558 	while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
2559 		if (!--timeout) {
2560 			printk(KERN_ERR "uhci: USBCMD_HCRESET timed out!\n");
2561 			break;
2562 		}
2563 	}
2564 
2565 	/* Turn on all interrupts */
2566 	outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
2567 		io_addr + USBINTR);
2568 
2569 	/* Start at frame 0 */
2570 	outw(0, io_addr + USBFRNUM);
2571 	outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
2572 
2573 	/* Run and mark it configured with a 64-byte max packet */
2574 	outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
2575 }
2576 
2577 #ifdef CONFIG_PROC_FS
2578 static int uhci_num = 0;
2579 #endif
2580 
free_uhci(struct uhci * uhci)2581 static void free_uhci(struct uhci *uhci)
2582 {
2583 	kfree(uhci);
2584 }
2585 
2586 /*
2587  * De-allocate all resources..
2588  */
release_uhci(struct uhci * uhci)2589 static void release_uhci(struct uhci *uhci)
2590 {
2591 	int i;
2592 #ifdef CONFIG_PROC_FS
2593 	char buf[8];
2594 #endif
2595 
2596 	if (uhci->irq >= 0) {
2597 		free_irq(uhci->irq, uhci);
2598 		uhci->irq = -1;
2599 	}
2600 
2601 	for (i = 0; i < UHCI_NUM_SKELQH; i++)
2602 		if (uhci->skelqh[i]) {
2603 			uhci_free_qh(uhci, uhci->skelqh[i]);
2604 			uhci->skelqh[i] = NULL;
2605 		}
2606 
2607 	for (i = 0; i < UHCI_NUM_SKELTD; i++)
2608 		if (uhci->skeltd[i]) {
2609 			uhci_free_td(uhci, uhci->skeltd[i]);
2610 			uhci->skeltd[i] = NULL;
2611 		}
2612 
2613 	if (uhci->qh_pool) {
2614 		pci_pool_destroy(uhci->qh_pool);
2615 		uhci->qh_pool = NULL;
2616 	}
2617 
2618 	if (uhci->td_pool) {
2619 		pci_pool_destroy(uhci->td_pool);
2620 		uhci->td_pool = NULL;
2621 	}
2622 
2623 	if (uhci->fl) {
2624 		pci_free_consistent(uhci->dev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle);
2625 		uhci->fl = NULL;
2626 	}
2627 
2628 	if (uhci->bus) {
2629 		usb_free_bus(uhci->bus);
2630 		uhci->bus = NULL;
2631 	}
2632 
2633 #ifdef CONFIG_PROC_FS
2634 	if (uhci->proc_entry) {
2635 		sprintf(buf, "hc%d", uhci->num);
2636 
2637 		remove_proc_entry(buf, uhci_proc_root);
2638 		uhci->proc_entry = NULL;
2639 	}
2640 #endif
2641 
2642 	free_uhci(uhci);
2643 }
2644 
2645 /*
2646  * Allocate a frame list, and then setup the skeleton
2647  *
2648  * The hardware doesn't really know any difference
2649  * in the queues, but the order does matter for the
2650  * protocols higher up. The order is:
2651  *
2652  *  - any isochronous events handled before any
2653  *    of the queues. We don't do that here, because
2654  *    we'll create the actual TD entries on demand.
2655  *  - The first queue is the interrupt queue.
2656  *  - The second queue is the control queue, split into low and high speed
2657  *  - The third queue is bulk queue.
2658  *  - The fourth queue is the bandwidth reclamation queue, which loops back
2659  *    to the high speed control queue.
2660  */
alloc_uhci(struct pci_dev * dev,unsigned int io_addr,unsigned int io_size)2661 static int alloc_uhci(struct pci_dev *dev, unsigned int io_addr, unsigned int io_size)
2662 {
2663 	struct uhci *uhci;
2664 	int retval;
2665 	char buf[8], *bufp = buf;
2666 	int i, port;
2667 	struct usb_bus *bus;
2668 	dma_addr_t dma_handle;
2669 #ifdef CONFIG_PROC_FS
2670 	struct proc_dir_entry *ent;
2671 #endif
2672 
2673 	retval = -ENODEV;
2674 	if (pci_enable_device(dev) < 0) {
2675 		err("couldn't enable PCI device");
2676 		goto err_enable_device;
2677 	}
2678 
2679 	if (!dev->irq) {
2680 		err("found UHCI device with no IRQ assigned. check BIOS settings!");
2681 		goto err_invalid_irq;
2682 	}
2683 
2684 	if (!pci_dma_supported(dev, 0xFFFFFFFF)) {
2685 		err("PCI subsystem doesn't support 32 bit addressing?");
2686 		goto err_pci_dma_supported;
2687 	}
2688 
2689 	retval = -EBUSY;
2690 	if (!request_region(io_addr, io_size, "usb-uhci")) {
2691 		err("couldn't allocate I/O range %x - %x", io_addr,
2692 			io_addr + io_size - 1);
2693 		goto err_request_region;
2694 	}
2695 
2696 	pci_set_master(dev);
2697 
2698 #ifndef __sparc__
2699 	sprintf(buf, "%d", dev->irq);
2700 #else
2701 	bufp = __irq_itoa(dev->irq);
2702 #endif
2703 	printk(KERN_INFO __FILE__ ": USB UHCI at I/O 0x%x, IRQ %s\n",
2704 		io_addr, bufp);
2705 
2706 	if (pci_set_dma_mask(dev, 0xFFFFFFFF)) {
2707 		err("couldn't set PCI dma mask");
2708 		retval = -ENODEV;
2709 		goto err_pci_set_dma_mask;
2710 	}
2711 
2712 	uhci = kmalloc(sizeof(*uhci), GFP_KERNEL);
2713 	if (!uhci) {
2714 		err("couldn't allocate uhci structure");
2715 		retval = -ENOMEM;
2716 		goto err_alloc_uhci;
2717 	}
2718 
2719 	uhci->dev = dev;
2720 	uhci->irq = dev->irq;
2721 	uhci->io_addr = io_addr;
2722 	uhci->io_size = io_size;
2723 	pci_set_drvdata(dev, uhci);
2724 
2725 #ifdef CONFIG_PROC_FS
2726 	uhci->num = uhci_num++;
2727 
2728 	sprintf(buf, "hc%d", uhci->num);
2729 
2730 	ent = create_proc_entry(buf, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
2731 	if (!ent) {
2732 		err("couldn't create uhci proc entry");
2733 		retval = -ENOMEM;
2734 		goto err_create_proc_entry;
2735 	}
2736 
2737 	ent->data = uhci;
2738 	ent->proc_fops = &uhci_proc_operations;
2739 	ent->size = 0;
2740 	uhci->proc_entry = ent;
2741 #endif
2742 
2743 	/* Reset here so we don't get any interrupts from an old setup */
2744 	/*  or broken setup */
2745 	reset_hc(uhci);
2746 
2747 	uhci->fsbr = 0;
2748 	uhci->fsbrtimeout = 0;
2749 
2750 	uhci->is_suspended = 0;
2751 
2752 	spin_lock_init(&uhci->qh_remove_list_lock);
2753 	INIT_LIST_HEAD(&uhci->qh_remove_list);
2754 
2755 	spin_lock_init(&uhci->urb_remove_list_lock);
2756 	INIT_LIST_HEAD(&uhci->urb_remove_list);
2757 
2758 	spin_lock_init(&uhci->urb_list_lock);
2759 	INIT_LIST_HEAD(&uhci->urb_list);
2760 
2761 	spin_lock_init(&uhci->complete_list_lock);
2762 	INIT_LIST_HEAD(&uhci->complete_list);
2763 
2764 	spin_lock_init(&uhci->frame_list_lock);
2765 
2766 	/* We need exactly one page (per UHCI specs), how convenient */
2767 	/* We assume that one page is atleast 4k (1024 frames * 4 bytes) */
2768 #if PAGE_SIZE < (4 * 1024)
2769 #error PAGE_SIZE is not atleast 4k
2770 #endif
2771 	uhci->fl = pci_alloc_consistent(uhci->dev, sizeof(*uhci->fl), &dma_handle);
2772 	if (!uhci->fl) {
2773 		err("unable to allocate consistent memory for frame list");
2774 		goto err_alloc_fl;
2775 	}
2776 
2777 	memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2778 
2779 	uhci->fl->dma_handle = dma_handle;
2780 
2781 	uhci->td_pool = pci_pool_create("uhci_td", uhci->dev,
2782 		sizeof(struct uhci_td), 16, 0, GFP_DMA | GFP_ATOMIC);
2783 	if (!uhci->td_pool) {
2784 		err("unable to create td pci_pool");
2785 		goto err_create_td_pool;
2786 	}
2787 
2788 	uhci->qh_pool = pci_pool_create("uhci_qh", uhci->dev,
2789 		sizeof(struct uhci_qh), 16, 0, GFP_DMA | GFP_ATOMIC);
2790 	if (!uhci->qh_pool) {
2791 		err("unable to create qh pci_pool");
2792 		goto err_create_qh_pool;
2793 	}
2794 
2795 	bus = usb_alloc_bus(&uhci_device_operations);
2796 	if (!bus) {
2797 		err("unable to allocate bus");
2798 		goto err_alloc_bus;
2799 	}
2800 
2801 	uhci->bus = bus;
2802 	bus->bus_name = dev->slot_name;
2803 	bus->hcpriv = uhci;
2804 
2805 	usb_register_bus(uhci->bus);
2806 
2807 	/* Initialize the root hub */
2808 
2809 	/* UHCI specs says devices must have 2 ports, but goes on to say */
2810 	/*  they may have more but give no way to determine how many they */
2811 	/*  have. However, according to the UHCI spec, Bit 7 is always set */
2812 	/*  to 1. So we try to use this to our advantage */
2813 	for (port = 0; port < (uhci->io_size - 0x10) / 2; port++) {
2814 		unsigned int portstatus;
2815 
2816 		portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2817 		if (!(portstatus & 0x0080))
2818 			break;
2819 	}
2820 	if (debug)
2821 		info("detected %d ports", port);
2822 
2823 	/* This is experimental so anything less than 2 or greater than 8 is */
2824 	/*  something weird and we'll ignore it */
2825 	if (port < 2 || port > 8) {
2826 		info("port count misdetected? forcing to 2 ports");
2827 		port = 2;
2828 	}
2829 
2830 	uhci->rh.numports = port;
2831 
2832 	uhci->bus->root_hub = uhci->rh.dev = usb_alloc_dev(NULL, uhci->bus);
2833 	if (!uhci->rh.dev) {
2834 		err("unable to allocate root hub");
2835 		goto err_alloc_root_hub;
2836 	}
2837 
2838 	uhci->skeltd[0] = uhci_alloc_td(uhci, uhci->rh.dev);
2839 	if (!uhci->skeltd[0]) {
2840 		err("unable to allocate TD 0");
2841 		goto err_alloc_skeltd;
2842 	}
2843 
2844 	/*
2845 	 * 9 Interrupt queues; link int2 to int1, int4 to int2, etc
2846 	 * then link int1 to control and control to bulk
2847 	 */
2848 	for (i = 1; i < 9; i++) {
2849 		struct uhci_td *td;
2850 
2851 		td = uhci->skeltd[i] = uhci_alloc_td(uhci, uhci->rh.dev);
2852 		if (!td) {
2853 			err("unable to allocate TD %d", i);
2854 			goto err_alloc_skeltd;
2855 		}
2856 
2857 		uhci_fill_td(td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
2858 		td->link = uhci->skeltd[i - 1]->dma_handle;
2859 	}
2860 
2861 	uhci->skel_term_td = uhci_alloc_td(uhci, uhci->rh.dev);
2862 	if (!uhci->skel_term_td) {
2863 		err("unable to allocate skel TD term");
2864 		goto err_alloc_skeltd;
2865 	}
2866 
2867 	for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2868 		uhci->skelqh[i] = uhci_alloc_qh(uhci, uhci->rh.dev);
2869 		if (!uhci->skelqh[i]) {
2870 			err("unable to allocate QH %d", i);
2871 			goto err_alloc_skelqh;
2872 		}
2873 	}
2874 
2875 	uhci_fill_td(uhci->skel_int1_td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
2876 	uhci->skel_int1_td->link = uhci->skel_ls_control_qh->dma_handle | UHCI_PTR_QH;
2877 
2878 	uhci->skel_ls_control_qh->link = uhci->skel_hs_control_qh->dma_handle | UHCI_PTR_QH;
2879 	uhci->skel_ls_control_qh->element = UHCI_PTR_TERM;
2880 
2881 	uhci->skel_hs_control_qh->link = uhci->skel_bulk_qh->dma_handle | UHCI_PTR_QH;
2882 	uhci->skel_hs_control_qh->element = UHCI_PTR_TERM;
2883 
2884 	uhci->skel_bulk_qh->link = uhci->skel_term_qh->dma_handle | UHCI_PTR_QH;
2885 	uhci->skel_bulk_qh->element = UHCI_PTR_TERM;
2886 
2887 	/* This dummy TD is to work around a bug in Intel PIIX controllers */
2888 	uhci_fill_td(uhci->skel_term_td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
2889 	uhci->skel_term_td->link = uhci->skel_term_td->dma_handle;
2890 
2891 	uhci->skel_term_qh->link = UHCI_PTR_TERM;
2892 	uhci->skel_term_qh->element = uhci->skel_term_td->dma_handle;
2893 
2894 	/*
2895 	 * Fill the frame list: make all entries point to
2896 	 * the proper interrupt queue.
2897 	 *
2898 	 * This is probably silly, but it's a simple way to
2899 	 * scatter the interrupt queues in a way that gives
2900 	 * us a reasonable dynamic range for irq latencies.
2901 	 */
2902 	for (i = 0; i < UHCI_NUMFRAMES; i++) {
2903 		int irq = 0;
2904 
2905 		if (i & 1) {
2906 			irq++;
2907 			if (i & 2) {
2908 				irq++;
2909 				if (i & 4) {
2910 					irq++;
2911 					if (i & 8) {
2912 						irq++;
2913 						if (i & 16) {
2914 							irq++;
2915 							if (i & 32) {
2916 								irq++;
2917 								if (i & 64)
2918 									irq++;
2919 							}
2920 						}
2921 					}
2922 				}
2923 			}
2924 		}
2925 
2926 		/* Only place we don't use the frame list routines */
2927 		uhci->fl->frame[i] =  uhci->skeltd[irq]->dma_handle;
2928 	}
2929 
2930 	start_hc(uhci);
2931 
2932 	if (request_irq(dev->irq, uhci_interrupt, SA_SHIRQ, "usb-uhci", uhci))
2933 		goto err_request_irq;
2934 
2935 	/* disable legacy emulation */
2936 	pci_write_config_word(uhci->dev, USBLEGSUP, USBLEGSUP_DEFAULT);
2937 
2938 	usb_connect(uhci->rh.dev);
2939 
2940 	if (usb_new_device(uhci->rh.dev) != 0) {
2941 		err("unable to start root hub");
2942 		retval = -ENOMEM;
2943 		goto err_start_root_hub;
2944 	}
2945 
2946 	return 0;
2947 
2948 /*
2949  * error exits:
2950  */
2951 err_start_root_hub:
2952 	free_irq(uhci->irq, uhci);
2953 	uhci->irq = -1;
2954 
2955 err_request_irq:
2956 	for (i = 0; i < UHCI_NUM_SKELQH; i++)
2957 		if (uhci->skelqh[i]) {
2958 			uhci_free_qh(uhci, uhci->skelqh[i]);
2959 			uhci->skelqh[i] = NULL;
2960 		}
2961 
2962 err_alloc_skelqh:
2963 	for (i = 0; i < UHCI_NUM_SKELTD; i++)
2964 		if (uhci->skeltd[i]) {
2965 			uhci_free_td(uhci, uhci->skeltd[i]);
2966 			uhci->skeltd[i] = NULL;
2967 		}
2968 
2969 err_alloc_skeltd:
2970 	usb_free_dev(uhci->rh.dev);
2971 	uhci->rh.dev = NULL;
2972 
2973 err_alloc_root_hub:
2974 	usb_free_bus(uhci->bus);
2975 	uhci->bus = NULL;
2976 
2977 err_alloc_bus:
2978 	pci_pool_destroy(uhci->qh_pool);
2979 	uhci->qh_pool = NULL;
2980 
2981 err_create_qh_pool:
2982 	pci_pool_destroy(uhci->td_pool);
2983 	uhci->td_pool = NULL;
2984 
2985 err_create_td_pool:
2986 	pci_free_consistent(uhci->dev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle);
2987 	uhci->fl = NULL;
2988 
2989 err_alloc_fl:
2990 #ifdef CONFIG_PROC_FS
2991 	remove_proc_entry(buf, uhci_proc_root);
2992 	uhci->proc_entry = NULL;
2993 
2994 err_create_proc_entry:
2995 	free_uhci(uhci);
2996 #endif
2997 
2998 err_alloc_uhci:
2999 
3000 err_pci_set_dma_mask:
3001 	release_region(io_addr, io_size);
3002 
3003 err_request_region:
3004 
3005 err_pci_dma_supported:
3006 
3007 err_invalid_irq:
3008 
3009 err_enable_device:
3010 
3011 	return retval;
3012 }
3013 
uhci_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)3014 static int __devinit uhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
3015 {
3016 	int i;
3017 
3018 	/* Search for the IO base address.. */
3019 	for (i = 0; i < 6; i++) {
3020 		unsigned int io_addr = pci_resource_start(dev, i);
3021 		unsigned int io_size = pci_resource_len(dev, i);
3022 
3023 		/* IO address? */
3024 		if (!(pci_resource_flags(dev, i) & IORESOURCE_IO))
3025 			continue;
3026 
3027 		return alloc_uhci(dev, io_addr, io_size);
3028 	}
3029 
3030 	return -ENODEV;
3031 }
3032 
uhci_pci_remove(struct pci_dev * dev)3033 static void __devexit uhci_pci_remove(struct pci_dev *dev)
3034 {
3035 	struct uhci *uhci = pci_get_drvdata(dev);
3036 
3037 	if (uhci->bus->root_hub)
3038 		usb_disconnect(&uhci->bus->root_hub);
3039 
3040 	usb_deregister_bus(uhci->bus);
3041 
3042 	/*
3043 	 * At this point, we're guaranteed that no new connects can be made
3044 	 * to this bus since there are no more parents
3045 	 */
3046 	uhci_free_pending_qhs(uhci);
3047 	uhci_remove_pending_qhs(uhci);
3048 
3049 	reset_hc(uhci);
3050 	release_region(uhci->io_addr, uhci->io_size);
3051 
3052 	uhci_free_pending_qhs(uhci);
3053 
3054 	release_uhci(uhci);
3055 }
3056 
3057 #ifdef CONFIG_PM
uhci_pci_suspend(struct pci_dev * dev,u32 state)3058 static int uhci_pci_suspend(struct pci_dev *dev, u32 state)
3059 {
3060 	suspend_hc((struct uhci *) pci_get_drvdata(dev));
3061 	return 0;
3062 }
3063 
uhci_pci_resume(struct pci_dev * dev)3064 static int uhci_pci_resume(struct pci_dev *dev)
3065 {
3066 	reset_hc((struct uhci *) pci_get_drvdata(dev));
3067 	start_hc((struct uhci *) pci_get_drvdata(dev));
3068 	return 0;
3069 }
3070 #endif
3071 
3072 static const struct pci_device_id __devinitdata uhci_pci_ids[] = { {
3073 
3074 	/* handle any USB UHCI controller */
3075 	class: 		((PCI_CLASS_SERIAL_USB << 8) | 0x00),
3076 	class_mask: 	~0,
3077 
3078 	/* no matter who makes it */
3079 	vendor:		PCI_ANY_ID,
3080 	device:		PCI_ANY_ID,
3081 	subvendor:	PCI_ANY_ID,
3082 	subdevice:	PCI_ANY_ID,
3083 
3084 	}, { /* end: all zeroes */ }
3085 };
3086 
3087 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
3088 
3089 static struct pci_driver uhci_pci_driver = {
3090 	name:		"usb-uhci",
3091 	id_table:	uhci_pci_ids,
3092 
3093 	probe:		uhci_pci_probe,
3094 	remove:		__devexit_p(uhci_pci_remove),
3095 
3096 #ifdef	CONFIG_PM
3097 	suspend:	uhci_pci_suspend,
3098 	resume:		uhci_pci_resume,
3099 #endif	/* PM */
3100 };
3101 
3102 
uhci_hcd_init(void)3103 static int __init uhci_hcd_init(void)
3104 {
3105 	int retval = -ENOMEM;
3106 
3107 	info(DRIVER_DESC " " DRIVER_VERSION);
3108 
3109 	if (debug) {
3110 		errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
3111 		if (!errbuf)
3112 			goto errbuf_failed;
3113 	}
3114 
3115 #ifdef CONFIG_PROC_FS
3116 	uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0);
3117 	if (!uhci_proc_root)
3118 		goto proc_failed;
3119 #endif
3120 
3121 	uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
3122 		sizeof(struct urb_priv), 0, 0, NULL, NULL);
3123 	if (!uhci_up_cachep)
3124 		goto up_failed;
3125 
3126 	retval = pci_module_init(&uhci_pci_driver);
3127 	if (retval)
3128 		goto init_failed;
3129 
3130 	return 0;
3131 
3132 init_failed:
3133 	if (kmem_cache_destroy(uhci_up_cachep))
3134 		printk(KERN_INFO "uhci: not all urb_priv's were freed\n");
3135 
3136 up_failed:
3137 
3138 #ifdef CONFIG_PROC_FS
3139 	remove_proc_entry("driver/uhci", 0);
3140 
3141 proc_failed:
3142 #endif
3143 	if (errbuf)
3144 		kfree(errbuf);
3145 
3146 errbuf_failed:
3147 
3148 	return retval;
3149 }
3150 
uhci_hcd_cleanup(void)3151 static void __exit uhci_hcd_cleanup(void)
3152 {
3153 	pci_unregister_driver(&uhci_pci_driver);
3154 
3155 	if (kmem_cache_destroy(uhci_up_cachep))
3156 		printk(KERN_INFO "uhci: not all urb_priv's were freed\n");
3157 
3158 #ifdef CONFIG_PROC_FS
3159 	remove_proc_entry("driver/uhci", 0);
3160 #endif
3161 
3162 	if (errbuf)
3163 		kfree(errbuf);
3164 }
3165 
3166 module_init(uhci_hcd_init);
3167 module_exit(uhci_hcd_cleanup);
3168 
3169 MODULE_AUTHOR(DRIVER_AUTHOR);
3170 MODULE_DESCRIPTION(DRIVER_DESC);
3171 MODULE_LICENSE("GPL");
3172 
3173