1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
17  */
18 
19 
20 /*
21  * Technically, updating td->status here is a race, but it's not really a
22  * problem. The worst that can happen is that we set the IOC bit again
23  * generating a spurious interrupt. We could fix this by creating another
24  * QH and leaving the IOC bit always set, but then we would have to play
25  * games with the FSBR code to make sure we get the correct order in all
26  * the cases. I don't think it's worth the effort
27  */
uhci_set_next_interrupt(struct uhci_hcd * uhci)28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
29 {
30 	if (uhci->is_stopped)
31 		mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32 	uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
33 }
34 
uhci_clear_next_interrupt(struct uhci_hcd * uhci)35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
36 {
37 	uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
38 }
39 
40 
41 /*
42  * Full-Speed Bandwidth Reclamation (FSBR).
43  * We turn on FSBR whenever a queue that wants it is advancing,
44  * and leave it on for a short time thereafter.
45  */
uhci_fsbr_on(struct uhci_hcd * uhci)46 static void uhci_fsbr_on(struct uhci_hcd *uhci)
47 {
48 	struct uhci_qh *lqh;
49 
50 	/* The terminating skeleton QH always points back to the first
51 	 * FSBR QH.  Make the last async QH point to the terminating
52 	 * skeleton QH. */
53 	uhci->fsbr_is_on = 1;
54 	lqh = list_entry(uhci->skel_async_qh->node.prev,
55 			struct uhci_qh, node);
56 	lqh->link = LINK_TO_QH(uhci->skel_term_qh);
57 }
58 
uhci_fsbr_off(struct uhci_hcd * uhci)59 static void uhci_fsbr_off(struct uhci_hcd *uhci)
60 {
61 	struct uhci_qh *lqh;
62 
63 	/* Remove the link from the last async QH to the terminating
64 	 * skeleton QH. */
65 	uhci->fsbr_is_on = 0;
66 	lqh = list_entry(uhci->skel_async_qh->node.prev,
67 			struct uhci_qh, node);
68 	lqh->link = UHCI_PTR_TERM;
69 }
70 
uhci_add_fsbr(struct uhci_hcd * uhci,struct urb * urb)71 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
72 {
73 	struct urb_priv *urbp = urb->hcpriv;
74 
75 	if (!(urb->transfer_flags & URB_NO_FSBR))
76 		urbp->fsbr = 1;
77 }
78 
uhci_urbp_wants_fsbr(struct uhci_hcd * uhci,struct urb_priv * urbp)79 static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
80 {
81 	if (urbp->fsbr) {
82 		uhci->fsbr_is_wanted = 1;
83 		if (!uhci->fsbr_is_on)
84 			uhci_fsbr_on(uhci);
85 		else if (uhci->fsbr_expiring) {
86 			uhci->fsbr_expiring = 0;
87 			del_timer(&uhci->fsbr_timer);
88 		}
89 	}
90 }
91 
uhci_fsbr_timeout(unsigned long _uhci)92 static void uhci_fsbr_timeout(unsigned long _uhci)
93 {
94 	struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
95 	unsigned long flags;
96 
97 	spin_lock_irqsave(&uhci->lock, flags);
98 	if (uhci->fsbr_expiring) {
99 		uhci->fsbr_expiring = 0;
100 		uhci_fsbr_off(uhci);
101 	}
102 	spin_unlock_irqrestore(&uhci->lock, flags);
103 }
104 
105 
uhci_alloc_td(struct uhci_hcd * uhci)106 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
107 {
108 	dma_addr_t dma_handle;
109 	struct uhci_td *td;
110 
111 	td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
112 	if (!td)
113 		return NULL;
114 
115 	td->dma_handle = dma_handle;
116 	td->frame = -1;
117 
118 	INIT_LIST_HEAD(&td->list);
119 	INIT_LIST_HEAD(&td->fl_list);
120 
121 	return td;
122 }
123 
uhci_free_td(struct uhci_hcd * uhci,struct uhci_td * td)124 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
125 {
126 	if (!list_empty(&td->list))
127 		dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
128 	if (!list_empty(&td->fl_list))
129 		dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
130 
131 	dma_pool_free(uhci->td_pool, td, td->dma_handle);
132 }
133 
uhci_fill_td(struct uhci_td * td,u32 status,u32 token,u32 buffer)134 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
135 		u32 token, u32 buffer)
136 {
137 	td->status = cpu_to_le32(status);
138 	td->token = cpu_to_le32(token);
139 	td->buffer = cpu_to_le32(buffer);
140 }
141 
uhci_add_td_to_urbp(struct uhci_td * td,struct urb_priv * urbp)142 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
143 {
144 	list_add_tail(&td->list, &urbp->td_list);
145 }
146 
uhci_remove_td_from_urbp(struct uhci_td * td)147 static void uhci_remove_td_from_urbp(struct uhci_td *td)
148 {
149 	list_del_init(&td->list);
150 }
151 
152 /*
153  * We insert Isochronous URBs directly into the frame list at the beginning
154  */
uhci_insert_td_in_frame_list(struct uhci_hcd * uhci,struct uhci_td * td,unsigned framenum)155 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
156 		struct uhci_td *td, unsigned framenum)
157 {
158 	framenum &= (UHCI_NUMFRAMES - 1);
159 
160 	td->frame = framenum;
161 
162 	/* Is there a TD already mapped there? */
163 	if (uhci->frame_cpu[framenum]) {
164 		struct uhci_td *ftd, *ltd;
165 
166 		ftd = uhci->frame_cpu[framenum];
167 		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
168 
169 		list_add_tail(&td->fl_list, &ftd->fl_list);
170 
171 		td->link = ltd->link;
172 		wmb();
173 		ltd->link = LINK_TO_TD(td);
174 	} else {
175 		td->link = uhci->frame[framenum];
176 		wmb();
177 		uhci->frame[framenum] = LINK_TO_TD(td);
178 		uhci->frame_cpu[framenum] = td;
179 	}
180 }
181 
uhci_remove_td_from_frame_list(struct uhci_hcd * uhci,struct uhci_td * td)182 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
183 		struct uhci_td *td)
184 {
185 	/* If it's not inserted, don't remove it */
186 	if (td->frame == -1) {
187 		WARN_ON(!list_empty(&td->fl_list));
188 		return;
189 	}
190 
191 	if (uhci->frame_cpu[td->frame] == td) {
192 		if (list_empty(&td->fl_list)) {
193 			uhci->frame[td->frame] = td->link;
194 			uhci->frame_cpu[td->frame] = NULL;
195 		} else {
196 			struct uhci_td *ntd;
197 
198 			ntd = list_entry(td->fl_list.next,
199 					 struct uhci_td,
200 					 fl_list);
201 			uhci->frame[td->frame] = LINK_TO_TD(ntd);
202 			uhci->frame_cpu[td->frame] = ntd;
203 		}
204 	} else {
205 		struct uhci_td *ptd;
206 
207 		ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
208 		ptd->link = td->link;
209 	}
210 
211 	list_del_init(&td->fl_list);
212 	td->frame = -1;
213 }
214 
uhci_remove_tds_from_frame(struct uhci_hcd * uhci,unsigned int framenum)215 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
216 		unsigned int framenum)
217 {
218 	struct uhci_td *ftd, *ltd;
219 
220 	framenum &= (UHCI_NUMFRAMES - 1);
221 
222 	ftd = uhci->frame_cpu[framenum];
223 	if (ftd) {
224 		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
225 		uhci->frame[framenum] = ltd->link;
226 		uhci->frame_cpu[framenum] = NULL;
227 
228 		while (!list_empty(&ftd->fl_list))
229 			list_del_init(ftd->fl_list.prev);
230 	}
231 }
232 
233 /*
234  * Remove all the TDs for an Isochronous URB from the frame list
235  */
uhci_unlink_isochronous_tds(struct uhci_hcd * uhci,struct urb * urb)236 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
237 {
238 	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
239 	struct uhci_td *td;
240 
241 	list_for_each_entry(td, &urbp->td_list, list)
242 		uhci_remove_td_from_frame_list(uhci, td);
243 }
244 
uhci_alloc_qh(struct uhci_hcd * uhci,struct usb_device * udev,struct usb_host_endpoint * hep)245 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
246 		struct usb_device *udev, struct usb_host_endpoint *hep)
247 {
248 	dma_addr_t dma_handle;
249 	struct uhci_qh *qh;
250 
251 	qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
252 	if (!qh)
253 		return NULL;
254 
255 	memset(qh, 0, sizeof(*qh));
256 	qh->dma_handle = dma_handle;
257 
258 	qh->element = UHCI_PTR_TERM;
259 	qh->link = UHCI_PTR_TERM;
260 
261 	INIT_LIST_HEAD(&qh->queue);
262 	INIT_LIST_HEAD(&qh->node);
263 
264 	if (udev) {		/* Normal QH */
265 		qh->type = usb_endpoint_type(&hep->desc);
266 		if (qh->type != USB_ENDPOINT_XFER_ISOC) {
267 			qh->dummy_td = uhci_alloc_td(uhci);
268 			if (!qh->dummy_td) {
269 				dma_pool_free(uhci->qh_pool, qh, dma_handle);
270 				return NULL;
271 			}
272 		}
273 		qh->state = QH_STATE_IDLE;
274 		qh->hep = hep;
275 		qh->udev = udev;
276 		hep->hcpriv = qh;
277 
278 		if (qh->type == USB_ENDPOINT_XFER_INT ||
279 				qh->type == USB_ENDPOINT_XFER_ISOC)
280 			qh->load = usb_calc_bus_time(udev->speed,
281 					usb_endpoint_dir_in(&hep->desc),
282 					qh->type == USB_ENDPOINT_XFER_ISOC,
283 					le16_to_cpu(hep->desc.wMaxPacketSize))
284 				/ 1000 + 1;
285 
286 	} else {		/* Skeleton QH */
287 		qh->state = QH_STATE_ACTIVE;
288 		qh->type = -1;
289 	}
290 	return qh;
291 }
292 
uhci_free_qh(struct uhci_hcd * uhci,struct uhci_qh * qh)293 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
294 {
295 	WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
296 	if (!list_empty(&qh->queue))
297 		dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
298 
299 	list_del(&qh->node);
300 	if (qh->udev) {
301 		qh->hep->hcpriv = NULL;
302 		if (qh->dummy_td)
303 			uhci_free_td(uhci, qh->dummy_td);
304 	}
305 	dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
306 }
307 
308 /*
309  * When a queue is stopped and a dequeued URB is given back, adjust
310  * the previous TD link (if the URB isn't first on the queue) or
311  * save its toggle value (if it is first and is currently executing).
312  *
313  * Returns 0 if the URB should not yet be given back, 1 otherwise.
314  */
uhci_cleanup_queue(struct uhci_hcd * uhci,struct uhci_qh * qh,struct urb * urb)315 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
316 		struct urb *urb)
317 {
318 	struct urb_priv *urbp = urb->hcpriv;
319 	struct uhci_td *td;
320 	int ret = 1;
321 
322 	/* Isochronous pipes don't use toggles and their TD link pointers
323 	 * get adjusted during uhci_urb_dequeue().  But since their queues
324 	 * cannot truly be stopped, we have to watch out for dequeues
325 	 * occurring after the nominal unlink frame. */
326 	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
327 		ret = (uhci->frame_number + uhci->is_stopped !=
328 				qh->unlink_frame);
329 		goto done;
330 	}
331 
332 	/* If the URB isn't first on its queue, adjust the link pointer
333 	 * of the last TD in the previous URB.  The toggle doesn't need
334 	 * to be saved since this URB can't be executing yet. */
335 	if (qh->queue.next != &urbp->node) {
336 		struct urb_priv *purbp;
337 		struct uhci_td *ptd;
338 
339 		purbp = list_entry(urbp->node.prev, struct urb_priv, node);
340 		WARN_ON(list_empty(&purbp->td_list));
341 		ptd = list_entry(purbp->td_list.prev, struct uhci_td,
342 				list);
343 		td = list_entry(urbp->td_list.prev, struct uhci_td,
344 				list);
345 		ptd->link = td->link;
346 		goto done;
347 	}
348 
349 	/* If the QH element pointer is UHCI_PTR_TERM then then currently
350 	 * executing URB has already been unlinked, so this one isn't it. */
351 	if (qh_element(qh) == UHCI_PTR_TERM)
352 		goto done;
353 	qh->element = UHCI_PTR_TERM;
354 
355 	/* Control pipes don't have to worry about toggles */
356 	if (qh->type == USB_ENDPOINT_XFER_CONTROL)
357 		goto done;
358 
359 	/* Save the next toggle value */
360 	WARN_ON(list_empty(&urbp->td_list));
361 	td = list_entry(urbp->td_list.next, struct uhci_td, list);
362 	qh->needs_fixup = 1;
363 	qh->initial_toggle = uhci_toggle(td_token(td));
364 
365 done:
366 	return ret;
367 }
368 
369 /*
370  * Fix up the data toggles for URBs in a queue, when one of them
371  * terminates early (short transfer, error, or dequeued).
372  */
uhci_fixup_toggles(struct uhci_qh * qh,int skip_first)373 static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
374 {
375 	struct urb_priv *urbp = NULL;
376 	struct uhci_td *td;
377 	unsigned int toggle = qh->initial_toggle;
378 	unsigned int pipe;
379 
380 	/* Fixups for a short transfer start with the second URB in the
381 	 * queue (the short URB is the first). */
382 	if (skip_first)
383 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
384 
385 	/* When starting with the first URB, if the QH element pointer is
386 	 * still valid then we know the URB's toggles are okay. */
387 	else if (qh_element(qh) != UHCI_PTR_TERM)
388 		toggle = 2;
389 
390 	/* Fix up the toggle for the URBs in the queue.  Normally this
391 	 * loop won't run more than once: When an error or short transfer
392 	 * occurs, the queue usually gets emptied. */
393 	urbp = list_prepare_entry(urbp, &qh->queue, node);
394 	list_for_each_entry_continue(urbp, &qh->queue, node) {
395 
396 		/* If the first TD has the right toggle value, we don't
397 		 * need to change any toggles in this URB */
398 		td = list_entry(urbp->td_list.next, struct uhci_td, list);
399 		if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
400 			td = list_entry(urbp->td_list.prev, struct uhci_td,
401 					list);
402 			toggle = uhci_toggle(td_token(td)) ^ 1;
403 
404 		/* Otherwise all the toggles in the URB have to be switched */
405 		} else {
406 			list_for_each_entry(td, &urbp->td_list, list) {
407 				td->token ^= cpu_to_le32(
408 							TD_TOKEN_TOGGLE);
409 				toggle ^= 1;
410 			}
411 		}
412 	}
413 
414 	wmb();
415 	pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
416 	usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
417 			usb_pipeout(pipe), toggle);
418 	qh->needs_fixup = 0;
419 }
420 
421 /*
422  * Link an Isochronous QH into its skeleton's list
423  */
link_iso(struct uhci_hcd * uhci,struct uhci_qh * qh)424 static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
425 {
426 	list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
427 
428 	/* Isochronous QHs aren't linked by the hardware */
429 }
430 
431 /*
432  * Link a high-period interrupt QH into the schedule at the end of its
433  * skeleton's list
434  */
link_interrupt(struct uhci_hcd * uhci,struct uhci_qh * qh)435 static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
436 {
437 	struct uhci_qh *pqh;
438 
439 	list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
440 
441 	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
442 	qh->link = pqh->link;
443 	wmb();
444 	pqh->link = LINK_TO_QH(qh);
445 }
446 
447 /*
448  * Link a period-1 interrupt or async QH into the schedule at the
449  * correct spot in the async skeleton's list, and update the FSBR link
450  */
link_async(struct uhci_hcd * uhci,struct uhci_qh * qh)451 static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
452 {
453 	struct uhci_qh *pqh;
454 	__le32 link_to_new_qh;
455 
456 	/* Find the predecessor QH for our new one and insert it in the list.
457 	 * The list of QHs is expected to be short, so linear search won't
458 	 * take too long. */
459 	list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
460 		if (pqh->skel <= qh->skel)
461 			break;
462 	}
463 	list_add(&qh->node, &pqh->node);
464 
465 	/* Link it into the schedule */
466 	qh->link = pqh->link;
467 	wmb();
468 	link_to_new_qh = LINK_TO_QH(qh);
469 	pqh->link = link_to_new_qh;
470 
471 	/* If this is now the first FSBR QH, link the terminating skeleton
472 	 * QH to it. */
473 	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
474 		uhci->skel_term_qh->link = link_to_new_qh;
475 }
476 
477 /*
478  * Put a QH on the schedule in both hardware and software
479  */
uhci_activate_qh(struct uhci_hcd * uhci,struct uhci_qh * qh)480 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
481 {
482 	WARN_ON(list_empty(&qh->queue));
483 
484 	/* Set the element pointer if it isn't set already.
485 	 * This isn't needed for Isochronous queues, but it doesn't hurt. */
486 	if (qh_element(qh) == UHCI_PTR_TERM) {
487 		struct urb_priv *urbp = list_entry(qh->queue.next,
488 				struct urb_priv, node);
489 		struct uhci_td *td = list_entry(urbp->td_list.next,
490 				struct uhci_td, list);
491 
492 		qh->element = LINK_TO_TD(td);
493 	}
494 
495 	/* Treat the queue as if it has just advanced */
496 	qh->wait_expired = 0;
497 	qh->advance_jiffies = jiffies;
498 
499 	if (qh->state == QH_STATE_ACTIVE)
500 		return;
501 	qh->state = QH_STATE_ACTIVE;
502 
503 	/* Move the QH from its old list to the correct spot in the appropriate
504 	 * skeleton's list */
505 	if (qh == uhci->next_qh)
506 		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
507 				node);
508 	list_del(&qh->node);
509 
510 	if (qh->skel == SKEL_ISO)
511 		link_iso(uhci, qh);
512 	else if (qh->skel < SKEL_ASYNC)
513 		link_interrupt(uhci, qh);
514 	else
515 		link_async(uhci, qh);
516 }
517 
518 /*
519  * Unlink a high-period interrupt QH from the schedule
520  */
unlink_interrupt(struct uhci_hcd * uhci,struct uhci_qh * qh)521 static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
522 {
523 	struct uhci_qh *pqh;
524 
525 	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
526 	pqh->link = qh->link;
527 	mb();
528 }
529 
530 /*
531  * Unlink a period-1 interrupt or async QH from the schedule
532  */
unlink_async(struct uhci_hcd * uhci,struct uhci_qh * qh)533 static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
534 {
535 	struct uhci_qh *pqh;
536 	__le32 link_to_next_qh = qh->link;
537 
538 	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
539 	pqh->link = link_to_next_qh;
540 
541 	/* If this was the old first FSBR QH, link the terminating skeleton
542 	 * QH to the next (new first FSBR) QH. */
543 	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
544 		uhci->skel_term_qh->link = link_to_next_qh;
545 	mb();
546 }
547 
548 /*
549  * Take a QH off the hardware schedule
550  */
uhci_unlink_qh(struct uhci_hcd * uhci,struct uhci_qh * qh)551 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
552 {
553 	if (qh->state == QH_STATE_UNLINKING)
554 		return;
555 	WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
556 	qh->state = QH_STATE_UNLINKING;
557 
558 	/* Unlink the QH from the schedule and record when we did it */
559 	if (qh->skel == SKEL_ISO)
560 		;
561 	else if (qh->skel < SKEL_ASYNC)
562 		unlink_interrupt(uhci, qh);
563 	else
564 		unlink_async(uhci, qh);
565 
566 	uhci_get_current_frame_number(uhci);
567 	qh->unlink_frame = uhci->frame_number;
568 
569 	/* Force an interrupt so we know when the QH is fully unlinked */
570 	if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
571 		uhci_set_next_interrupt(uhci);
572 
573 	/* Move the QH from its old list to the end of the unlinking list */
574 	if (qh == uhci->next_qh)
575 		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
576 				node);
577 	list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
578 }
579 
580 /*
581  * When we and the controller are through with a QH, it becomes IDLE.
582  * This happens when a QH has been off the schedule (on the unlinking
583  * list) for more than one frame, or when an error occurs while adding
584  * the first URB onto a new QH.
585  */
uhci_make_qh_idle(struct uhci_hcd * uhci,struct uhci_qh * qh)586 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
587 {
588 	WARN_ON(qh->state == QH_STATE_ACTIVE);
589 
590 	if (qh == uhci->next_qh)
591 		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
592 				node);
593 	list_move(&qh->node, &uhci->idle_qh_list);
594 	qh->state = QH_STATE_IDLE;
595 
596 	/* Now that the QH is idle, its post_td isn't being used */
597 	if (qh->post_td) {
598 		uhci_free_td(uhci, qh->post_td);
599 		qh->post_td = NULL;
600 	}
601 
602 	/* If anyone is waiting for a QH to become idle, wake them up */
603 	if (uhci->num_waiting)
604 		wake_up_all(&uhci->waitqh);
605 }
606 
607 /*
608  * Find the highest existing bandwidth load for a given phase and period.
609  */
uhci_highest_load(struct uhci_hcd * uhci,int phase,int period)610 static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
611 {
612 	int highest_load = uhci->load[phase];
613 
614 	for (phase += period; phase < MAX_PHASE; phase += period)
615 		highest_load = max_t(int, highest_load, uhci->load[phase]);
616 	return highest_load;
617 }
618 
619 /*
620  * Set qh->phase to the optimal phase for a periodic transfer and
621  * check whether the bandwidth requirement is acceptable.
622  */
uhci_check_bandwidth(struct uhci_hcd * uhci,struct uhci_qh * qh)623 static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
624 {
625 	int minimax_load;
626 
627 	/* Find the optimal phase (unless it is already set) and get
628 	 * its load value. */
629 	if (qh->phase >= 0)
630 		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
631 	else {
632 		int phase, load;
633 		int max_phase = min_t(int, MAX_PHASE, qh->period);
634 
635 		qh->phase = 0;
636 		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
637 		for (phase = 1; phase < max_phase; ++phase) {
638 			load = uhci_highest_load(uhci, phase, qh->period);
639 			if (load < minimax_load) {
640 				minimax_load = load;
641 				qh->phase = phase;
642 			}
643 		}
644 	}
645 
646 	/* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
647 	if (minimax_load + qh->load > 900) {
648 		dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
649 				"period %d, phase %d, %d + %d us\n",
650 				qh->period, qh->phase, minimax_load, qh->load);
651 		return -ENOSPC;
652 	}
653 	return 0;
654 }
655 
656 /*
657  * Reserve a periodic QH's bandwidth in the schedule
658  */
uhci_reserve_bandwidth(struct uhci_hcd * uhci,struct uhci_qh * qh)659 static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
660 {
661 	int i;
662 	int load = qh->load;
663 	char *p = "??";
664 
665 	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
666 		uhci->load[i] += load;
667 		uhci->total_load += load;
668 	}
669 	uhci_to_hcd(uhci)->self.bandwidth_allocated =
670 			uhci->total_load / MAX_PHASE;
671 	switch (qh->type) {
672 	case USB_ENDPOINT_XFER_INT:
673 		++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
674 		p = "INT";
675 		break;
676 	case USB_ENDPOINT_XFER_ISOC:
677 		++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
678 		p = "ISO";
679 		break;
680 	}
681 	qh->bandwidth_reserved = 1;
682 	dev_dbg(uhci_dev(uhci),
683 			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
684 			"reserve", qh->udev->devnum,
685 			qh->hep->desc.bEndpointAddress, p,
686 			qh->period, qh->phase, load);
687 }
688 
689 /*
690  * Release a periodic QH's bandwidth reservation
691  */
uhci_release_bandwidth(struct uhci_hcd * uhci,struct uhci_qh * qh)692 static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
693 {
694 	int i;
695 	int load = qh->load;
696 	char *p = "??";
697 
698 	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
699 		uhci->load[i] -= load;
700 		uhci->total_load -= load;
701 	}
702 	uhci_to_hcd(uhci)->self.bandwidth_allocated =
703 			uhci->total_load / MAX_PHASE;
704 	switch (qh->type) {
705 	case USB_ENDPOINT_XFER_INT:
706 		--uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
707 		p = "INT";
708 		break;
709 	case USB_ENDPOINT_XFER_ISOC:
710 		--uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
711 		p = "ISO";
712 		break;
713 	}
714 	qh->bandwidth_reserved = 0;
715 	dev_dbg(uhci_dev(uhci),
716 			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
717 			"release", qh->udev->devnum,
718 			qh->hep->desc.bEndpointAddress, p,
719 			qh->period, qh->phase, load);
720 }
721 
uhci_alloc_urb_priv(struct uhci_hcd * uhci,struct urb * urb)722 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
723 		struct urb *urb)
724 {
725 	struct urb_priv *urbp;
726 
727 	urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
728 	if (!urbp)
729 		return NULL;
730 
731 	urbp->urb = urb;
732 	urb->hcpriv = urbp;
733 
734 	INIT_LIST_HEAD(&urbp->node);
735 	INIT_LIST_HEAD(&urbp->td_list);
736 
737 	return urbp;
738 }
739 
uhci_free_urb_priv(struct uhci_hcd * uhci,struct urb_priv * urbp)740 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
741 		struct urb_priv *urbp)
742 {
743 	struct uhci_td *td, *tmp;
744 
745 	if (!list_empty(&urbp->node))
746 		dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
747 				urbp->urb);
748 
749 	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
750 		uhci_remove_td_from_urbp(td);
751 		uhci_free_td(uhci, td);
752 	}
753 
754 	kmem_cache_free(uhci_up_cachep, urbp);
755 }
756 
757 /*
758  * Map status to standard result codes
759  *
760  * <status> is (td_status(td) & 0xF60000), a.k.a.
761  * uhci_status_bits(td_status(td)).
762  * Note: <status> does not include the TD_CTRL_NAK bit.
763  * <dir_out> is True for output TDs and False for input TDs.
764  */
uhci_map_status(int status,int dir_out)765 static int uhci_map_status(int status, int dir_out)
766 {
767 	if (!status)
768 		return 0;
769 	if (status & TD_CTRL_BITSTUFF)			/* Bitstuff error */
770 		return -EPROTO;
771 	if (status & TD_CTRL_CRCTIMEO) {		/* CRC/Timeout */
772 		if (dir_out)
773 			return -EPROTO;
774 		else
775 			return -EILSEQ;
776 	}
777 	if (status & TD_CTRL_BABBLE)			/* Babble */
778 		return -EOVERFLOW;
779 	if (status & TD_CTRL_DBUFERR)			/* Buffer error */
780 		return -ENOSR;
781 	if (status & TD_CTRL_STALLED)			/* Stalled */
782 		return -EPIPE;
783 	return 0;
784 }
785 
786 /*
787  * Control transfers
788  */
uhci_submit_control(struct uhci_hcd * uhci,struct urb * urb,struct uhci_qh * qh)789 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
790 		struct uhci_qh *qh)
791 {
792 	struct uhci_td *td;
793 	unsigned long destination, status;
794 	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
795 	int len = urb->transfer_buffer_length;
796 	dma_addr_t data = urb->transfer_dma;
797 	__le32 *plink;
798 	struct urb_priv *urbp = urb->hcpriv;
799 	int skel;
800 
801 	/* The "pipe" thing contains the destination in bits 8--18 */
802 	destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
803 
804 	/* 3 errors, dummy TD remains inactive */
805 	status = uhci_maxerr(3);
806 	if (urb->dev->speed == USB_SPEED_LOW)
807 		status |= TD_CTRL_LS;
808 
809 	/*
810 	 * Build the TD for the control request setup packet
811 	 */
812 	td = qh->dummy_td;
813 	uhci_add_td_to_urbp(td, urbp);
814 	uhci_fill_td(td, status, destination | uhci_explen(8),
815 			urb->setup_dma);
816 	plink = &td->link;
817 	status |= TD_CTRL_ACTIVE;
818 
819 	/*
820 	 * If direction is "send", change the packet ID from SETUP (0x2D)
821 	 * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
822 	 * set Short Packet Detect (SPD) for all data packets.
823 	 *
824 	 * 0-length transfers always get treated as "send".
825 	 */
826 	if (usb_pipeout(urb->pipe) || len == 0)
827 		destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
828 	else {
829 		destination ^= (USB_PID_SETUP ^ USB_PID_IN);
830 		status |= TD_CTRL_SPD;
831 	}
832 
833 	/*
834 	 * Build the DATA TDs
835 	 */
836 	while (len > 0) {
837 		int pktsze = maxsze;
838 
839 		if (len <= pktsze) {		/* The last data packet */
840 			pktsze = len;
841 			status &= ~TD_CTRL_SPD;
842 		}
843 
844 		td = uhci_alloc_td(uhci);
845 		if (!td)
846 			goto nomem;
847 		*plink = LINK_TO_TD(td);
848 
849 		/* Alternate Data0/1 (start with Data1) */
850 		destination ^= TD_TOKEN_TOGGLE;
851 
852 		uhci_add_td_to_urbp(td, urbp);
853 		uhci_fill_td(td, status, destination | uhci_explen(pktsze),
854 				data);
855 		plink = &td->link;
856 
857 		data += pktsze;
858 		len -= pktsze;
859 	}
860 
861 	/*
862 	 * Build the final TD for control status
863 	 */
864 	td = uhci_alloc_td(uhci);
865 	if (!td)
866 		goto nomem;
867 	*plink = LINK_TO_TD(td);
868 
869 	/* Change direction for the status transaction */
870 	destination ^= (USB_PID_IN ^ USB_PID_OUT);
871 	destination |= TD_TOKEN_TOGGLE;		/* End in Data1 */
872 
873 	uhci_add_td_to_urbp(td, urbp);
874 	uhci_fill_td(td, status | TD_CTRL_IOC,
875 			destination | uhci_explen(0), 0);
876 	plink = &td->link;
877 
878 	/*
879 	 * Build the new dummy TD and activate the old one
880 	 */
881 	td = uhci_alloc_td(uhci);
882 	if (!td)
883 		goto nomem;
884 	*plink = LINK_TO_TD(td);
885 
886 	uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
887 	wmb();
888 	qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE);
889 	qh->dummy_td = td;
890 
891 	/* Low-speed transfers get a different queue, and won't hog the bus.
892 	 * Also, some devices enumerate better without FSBR; the easiest way
893 	 * to do that is to put URBs on the low-speed queue while the device
894 	 * isn't in the CONFIGURED state. */
895 	if (urb->dev->speed == USB_SPEED_LOW ||
896 			urb->dev->state != USB_STATE_CONFIGURED)
897 		skel = SKEL_LS_CONTROL;
898 	else {
899 		skel = SKEL_FS_CONTROL;
900 		uhci_add_fsbr(uhci, urb);
901 	}
902 	if (qh->state != QH_STATE_ACTIVE)
903 		qh->skel = skel;
904 	return 0;
905 
906 nomem:
907 	/* Remove the dummy TD from the td_list so it doesn't get freed */
908 	uhci_remove_td_from_urbp(qh->dummy_td);
909 	return -ENOMEM;
910 }
911 
912 /*
913  * Common submit for bulk and interrupt
914  */
uhci_submit_common(struct uhci_hcd * uhci,struct urb * urb,struct uhci_qh * qh)915 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
916 		struct uhci_qh *qh)
917 {
918 	struct uhci_td *td;
919 	unsigned long destination, status;
920 	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
921 	int len = urb->transfer_buffer_length;
922 	int this_sg_len;
923 	dma_addr_t data;
924 	__le32 *plink;
925 	struct urb_priv *urbp = urb->hcpriv;
926 	unsigned int toggle;
927 	struct scatterlist  *sg;
928 	int i;
929 
930 	if (len < 0)
931 		return -EINVAL;
932 
933 	/* The "pipe" thing contains the destination in bits 8--18 */
934 	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
935 	toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
936 			 usb_pipeout(urb->pipe));
937 
938 	/* 3 errors, dummy TD remains inactive */
939 	status = uhci_maxerr(3);
940 	if (urb->dev->speed == USB_SPEED_LOW)
941 		status |= TD_CTRL_LS;
942 	if (usb_pipein(urb->pipe))
943 		status |= TD_CTRL_SPD;
944 
945 	i = urb->num_sgs;
946 	if (len > 0 && i > 0) {
947 		sg = urb->sg;
948 		data = sg_dma_address(sg);
949 
950 		/* urb->transfer_buffer_length may be smaller than the
951 		 * size of the scatterlist (or vice versa)
952 		 */
953 		this_sg_len = min_t(int, sg_dma_len(sg), len);
954 	} else {
955 		sg = NULL;
956 		data = urb->transfer_dma;
957 		this_sg_len = len;
958 	}
959 	/*
960 	 * Build the DATA TDs
961 	 */
962 	plink = NULL;
963 	td = qh->dummy_td;
964 	for (;;) {	/* Allow zero length packets */
965 		int pktsze = maxsze;
966 
967 		if (len <= pktsze) {		/* The last packet */
968 			pktsze = len;
969 			if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
970 				status &= ~TD_CTRL_SPD;
971 		}
972 
973 		if (plink) {
974 			td = uhci_alloc_td(uhci);
975 			if (!td)
976 				goto nomem;
977 			*plink = LINK_TO_TD(td);
978 		}
979 		uhci_add_td_to_urbp(td, urbp);
980 		uhci_fill_td(td, status,
981 				destination | uhci_explen(pktsze) |
982 					(toggle << TD_TOKEN_TOGGLE_SHIFT),
983 				data);
984 		plink = &td->link;
985 		status |= TD_CTRL_ACTIVE;
986 
987 		toggle ^= 1;
988 		data += pktsze;
989 		this_sg_len -= pktsze;
990 		len -= maxsze;
991 		if (this_sg_len <= 0) {
992 			if (--i <= 0 || len <= 0)
993 				break;
994 			sg = sg_next(sg);
995 			data = sg_dma_address(sg);
996 			this_sg_len = min_t(int, sg_dma_len(sg), len);
997 		}
998 	}
999 
1000 	/*
1001 	 * URB_ZERO_PACKET means adding a 0-length packet, if direction
1002 	 * is OUT and the transfer_length was an exact multiple of maxsze,
1003 	 * hence (len = transfer_length - N * maxsze) == 0
1004 	 * however, if transfer_length == 0, the zero packet was already
1005 	 * prepared above.
1006 	 */
1007 	if ((urb->transfer_flags & URB_ZERO_PACKET) &&
1008 			usb_pipeout(urb->pipe) && len == 0 &&
1009 			urb->transfer_buffer_length > 0) {
1010 		td = uhci_alloc_td(uhci);
1011 		if (!td)
1012 			goto nomem;
1013 		*plink = LINK_TO_TD(td);
1014 
1015 		uhci_add_td_to_urbp(td, urbp);
1016 		uhci_fill_td(td, status,
1017 				destination | uhci_explen(0) |
1018 					(toggle << TD_TOKEN_TOGGLE_SHIFT),
1019 				data);
1020 		plink = &td->link;
1021 
1022 		toggle ^= 1;
1023 	}
1024 
1025 	/* Set the interrupt-on-completion flag on the last packet.
1026 	 * A more-or-less typical 4 KB URB (= size of one memory page)
1027 	 * will require about 3 ms to transfer; that's a little on the
1028 	 * fast side but not enough to justify delaying an interrupt
1029 	 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1030 	 * flag setting. */
1031 	td->status |= cpu_to_le32(TD_CTRL_IOC);
1032 
1033 	/*
1034 	 * Build the new dummy TD and activate the old one
1035 	 */
1036 	td = uhci_alloc_td(uhci);
1037 	if (!td)
1038 		goto nomem;
1039 	*plink = LINK_TO_TD(td);
1040 
1041 	uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
1042 	wmb();
1043 	qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE);
1044 	qh->dummy_td = td;
1045 
1046 	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1047 			usb_pipeout(urb->pipe), toggle);
1048 	return 0;
1049 
1050 nomem:
1051 	/* Remove the dummy TD from the td_list so it doesn't get freed */
1052 	uhci_remove_td_from_urbp(qh->dummy_td);
1053 	return -ENOMEM;
1054 }
1055 
uhci_submit_bulk(struct uhci_hcd * uhci,struct urb * urb,struct uhci_qh * qh)1056 static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1057 		struct uhci_qh *qh)
1058 {
1059 	int ret;
1060 
1061 	/* Can't have low-speed bulk transfers */
1062 	if (urb->dev->speed == USB_SPEED_LOW)
1063 		return -EINVAL;
1064 
1065 	if (qh->state != QH_STATE_ACTIVE)
1066 		qh->skel = SKEL_BULK;
1067 	ret = uhci_submit_common(uhci, urb, qh);
1068 	if (ret == 0)
1069 		uhci_add_fsbr(uhci, urb);
1070 	return ret;
1071 }
1072 
uhci_submit_interrupt(struct uhci_hcd * uhci,struct urb * urb,struct uhci_qh * qh)1073 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1074 		struct uhci_qh *qh)
1075 {
1076 	int ret;
1077 
1078 	/* USB 1.1 interrupt transfers only involve one packet per interval.
1079 	 * Drivers can submit URBs of any length, but longer ones will need
1080 	 * multiple intervals to complete.
1081 	 */
1082 
1083 	if (!qh->bandwidth_reserved) {
1084 		int exponent;
1085 
1086 		/* Figure out which power-of-two queue to use */
1087 		for (exponent = 7; exponent >= 0; --exponent) {
1088 			if ((1 << exponent) <= urb->interval)
1089 				break;
1090 		}
1091 		if (exponent < 0)
1092 			return -EINVAL;
1093 
1094 		/* If the slot is full, try a lower period */
1095 		do {
1096 			qh->period = 1 << exponent;
1097 			qh->skel = SKEL_INDEX(exponent);
1098 
1099 			/* For now, interrupt phase is fixed by the layout
1100 			 * of the QH lists.
1101 			 */
1102 			qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
1103 			ret = uhci_check_bandwidth(uhci, qh);
1104 		} while (ret != 0 && --exponent >= 0);
1105 		if (ret)
1106 			return ret;
1107 	} else if (qh->period > urb->interval)
1108 		return -EINVAL;		/* Can't decrease the period */
1109 
1110 	ret = uhci_submit_common(uhci, urb, qh);
1111 	if (ret == 0) {
1112 		urb->interval = qh->period;
1113 		if (!qh->bandwidth_reserved)
1114 			uhci_reserve_bandwidth(uhci, qh);
1115 	}
1116 	return ret;
1117 }
1118 
1119 /*
1120  * Fix up the data structures following a short transfer
1121  */
uhci_fixup_short_transfer(struct uhci_hcd * uhci,struct uhci_qh * qh,struct urb_priv * urbp)1122 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1123 		struct uhci_qh *qh, struct urb_priv *urbp)
1124 {
1125 	struct uhci_td *td;
1126 	struct list_head *tmp;
1127 	int ret;
1128 
1129 	td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1130 	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1131 
1132 		/* When a control transfer is short, we have to restart
1133 		 * the queue at the status stage transaction, which is
1134 		 * the last TD. */
1135 		WARN_ON(list_empty(&urbp->td_list));
1136 		qh->element = LINK_TO_TD(td);
1137 		tmp = td->list.prev;
1138 		ret = -EINPROGRESS;
1139 
1140 	} else {
1141 
1142 		/* When a bulk/interrupt transfer is short, we have to
1143 		 * fix up the toggles of the following URBs on the queue
1144 		 * before restarting the queue at the next URB. */
1145 		qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
1146 		uhci_fixup_toggles(qh, 1);
1147 
1148 		if (list_empty(&urbp->td_list))
1149 			td = qh->post_td;
1150 		qh->element = td->link;
1151 		tmp = urbp->td_list.prev;
1152 		ret = 0;
1153 	}
1154 
1155 	/* Remove all the TDs we skipped over, from tmp back to the start */
1156 	while (tmp != &urbp->td_list) {
1157 		td = list_entry(tmp, struct uhci_td, list);
1158 		tmp = tmp->prev;
1159 
1160 		uhci_remove_td_from_urbp(td);
1161 		uhci_free_td(uhci, td);
1162 	}
1163 	return ret;
1164 }
1165 
1166 /*
1167  * Common result for control, bulk, and interrupt
1168  */
uhci_result_common(struct uhci_hcd * uhci,struct urb * urb)1169 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1170 {
1171 	struct urb_priv *urbp = urb->hcpriv;
1172 	struct uhci_qh *qh = urbp->qh;
1173 	struct uhci_td *td, *tmp;
1174 	unsigned status;
1175 	int ret = 0;
1176 
1177 	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1178 		unsigned int ctrlstat;
1179 		int len;
1180 
1181 		ctrlstat = td_status(td);
1182 		status = uhci_status_bits(ctrlstat);
1183 		if (status & TD_CTRL_ACTIVE)
1184 			return -EINPROGRESS;
1185 
1186 		len = uhci_actual_length(ctrlstat);
1187 		urb->actual_length += len;
1188 
1189 		if (status) {
1190 			ret = uhci_map_status(status,
1191 					uhci_packetout(td_token(td)));
1192 			if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1193 				/* Some debugging code */
1194 				dev_dbg(&urb->dev->dev,
1195 						"%s: failed with status %x\n",
1196 						__func__, status);
1197 
1198 				if (debug > 1 && errbuf) {
1199 					/* Print the chain for debugging */
1200 					uhci_show_qh(uhci, urbp->qh, errbuf,
1201 							ERRBUF_LEN, 0);
1202 					lprintk(errbuf);
1203 				}
1204 			}
1205 
1206 		/* Did we receive a short packet? */
1207 		} else if (len < uhci_expected_length(td_token(td))) {
1208 
1209 			/* For control transfers, go to the status TD if
1210 			 * this isn't already the last data TD */
1211 			if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1212 				if (td->list.next != urbp->td_list.prev)
1213 					ret = 1;
1214 			}
1215 
1216 			/* For bulk and interrupt, this may be an error */
1217 			else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1218 				ret = -EREMOTEIO;
1219 
1220 			/* Fixup needed only if this isn't the URB's last TD */
1221 			else if (&td->list != urbp->td_list.prev)
1222 				ret = 1;
1223 		}
1224 
1225 		uhci_remove_td_from_urbp(td);
1226 		if (qh->post_td)
1227 			uhci_free_td(uhci, qh->post_td);
1228 		qh->post_td = td;
1229 
1230 		if (ret != 0)
1231 			goto err;
1232 	}
1233 	return ret;
1234 
1235 err:
1236 	if (ret < 0) {
1237 		/* Note that the queue has stopped and save
1238 		 * the next toggle value */
1239 		qh->element = UHCI_PTR_TERM;
1240 		qh->is_stopped = 1;
1241 		qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1242 		qh->initial_toggle = uhci_toggle(td_token(td)) ^
1243 				(ret == -EREMOTEIO);
1244 
1245 	} else		/* Short packet received */
1246 		ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1247 	return ret;
1248 }
1249 
1250 /*
1251  * Isochronous transfers
1252  */
uhci_submit_isochronous(struct uhci_hcd * uhci,struct urb * urb,struct uhci_qh * qh)1253 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1254 		struct uhci_qh *qh)
1255 {
1256 	struct uhci_td *td = NULL;	/* Since urb->number_of_packets > 0 */
1257 	int i, frame;
1258 	unsigned long destination, status;
1259 	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1260 
1261 	/* Values must not be too big (could overflow below) */
1262 	if (urb->interval >= UHCI_NUMFRAMES ||
1263 			urb->number_of_packets >= UHCI_NUMFRAMES)
1264 		return -EFBIG;
1265 
1266 	/* Check the period and figure out the starting frame number */
1267 	if (!qh->bandwidth_reserved) {
1268 		qh->period = urb->interval;
1269 		if (urb->transfer_flags & URB_ISO_ASAP) {
1270 			qh->phase = -1;		/* Find the best phase */
1271 			i = uhci_check_bandwidth(uhci, qh);
1272 			if (i)
1273 				return i;
1274 
1275 			/* Allow a little time to allocate the TDs */
1276 			uhci_get_current_frame_number(uhci);
1277 			frame = uhci->frame_number + 10;
1278 
1279 			/* Move forward to the first frame having the
1280 			 * correct phase */
1281 			urb->start_frame = frame + ((qh->phase - frame) &
1282 					(qh->period - 1));
1283 		} else {
1284 			i = urb->start_frame - uhci->last_iso_frame;
1285 			if (i <= 0 || i >= UHCI_NUMFRAMES)
1286 				return -EINVAL;
1287 			qh->phase = urb->start_frame & (qh->period - 1);
1288 			i = uhci_check_bandwidth(uhci, qh);
1289 			if (i)
1290 				return i;
1291 		}
1292 
1293 	} else if (qh->period != urb->interval) {
1294 		return -EINVAL;		/* Can't change the period */
1295 
1296 	} else {
1297 		/* Find the next unused frame */
1298 		if (list_empty(&qh->queue)) {
1299 			frame = qh->iso_frame;
1300 		} else {
1301 			struct urb *lurb;
1302 
1303 			lurb = list_entry(qh->queue.prev,
1304 					struct urb_priv, node)->urb;
1305 			frame = lurb->start_frame +
1306 					lurb->number_of_packets *
1307 					lurb->interval;
1308 		}
1309 		if (urb->transfer_flags & URB_ISO_ASAP) {
1310 			/* Skip some frames if necessary to insure
1311 			 * the start frame is in the future.
1312 			 */
1313 			uhci_get_current_frame_number(uhci);
1314 			if (uhci_frame_before_eq(frame, uhci->frame_number)) {
1315 				frame = uhci->frame_number + 1;
1316 				frame += ((qh->phase - frame) &
1317 					(qh->period - 1));
1318 			}
1319 		}	/* Otherwise pick up where the last URB leaves off */
1320 		urb->start_frame = frame;
1321 	}
1322 
1323 	/* Make sure we won't have to go too far into the future */
1324 	if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1325 			urb->start_frame + urb->number_of_packets *
1326 				urb->interval))
1327 		return -EFBIG;
1328 
1329 	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1330 	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1331 
1332 	for (i = 0; i < urb->number_of_packets; i++) {
1333 		td = uhci_alloc_td(uhci);
1334 		if (!td)
1335 			return -ENOMEM;
1336 
1337 		uhci_add_td_to_urbp(td, urbp);
1338 		uhci_fill_td(td, status, destination |
1339 				uhci_explen(urb->iso_frame_desc[i].length),
1340 				urb->transfer_dma +
1341 					urb->iso_frame_desc[i].offset);
1342 	}
1343 
1344 	/* Set the interrupt-on-completion flag on the last packet. */
1345 	td->status |= cpu_to_le32(TD_CTRL_IOC);
1346 
1347 	/* Add the TDs to the frame list */
1348 	frame = urb->start_frame;
1349 	list_for_each_entry(td, &urbp->td_list, list) {
1350 		uhci_insert_td_in_frame_list(uhci, td, frame);
1351 		frame += qh->period;
1352 	}
1353 
1354 	if (list_empty(&qh->queue)) {
1355 		qh->iso_packet_desc = &urb->iso_frame_desc[0];
1356 		qh->iso_frame = urb->start_frame;
1357 	}
1358 
1359 	qh->skel = SKEL_ISO;
1360 	if (!qh->bandwidth_reserved)
1361 		uhci_reserve_bandwidth(uhci, qh);
1362 	return 0;
1363 }
1364 
uhci_result_isochronous(struct uhci_hcd * uhci,struct urb * urb)1365 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1366 {
1367 	struct uhci_td *td, *tmp;
1368 	struct urb_priv *urbp = urb->hcpriv;
1369 	struct uhci_qh *qh = urbp->qh;
1370 
1371 	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1372 		unsigned int ctrlstat;
1373 		int status;
1374 		int actlength;
1375 
1376 		if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1377 			return -EINPROGRESS;
1378 
1379 		uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1380 
1381 		ctrlstat = td_status(td);
1382 		if (ctrlstat & TD_CTRL_ACTIVE) {
1383 			status = -EXDEV;	/* TD was added too late? */
1384 		} else {
1385 			status = uhci_map_status(uhci_status_bits(ctrlstat),
1386 					usb_pipeout(urb->pipe));
1387 			actlength = uhci_actual_length(ctrlstat);
1388 
1389 			urb->actual_length += actlength;
1390 			qh->iso_packet_desc->actual_length = actlength;
1391 			qh->iso_packet_desc->status = status;
1392 		}
1393 		if (status)
1394 			urb->error_count++;
1395 
1396 		uhci_remove_td_from_urbp(td);
1397 		uhci_free_td(uhci, td);
1398 		qh->iso_frame += qh->period;
1399 		++qh->iso_packet_desc;
1400 	}
1401 	return 0;
1402 }
1403 
uhci_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1404 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1405 		struct urb *urb, gfp_t mem_flags)
1406 {
1407 	int ret;
1408 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1409 	unsigned long flags;
1410 	struct urb_priv *urbp;
1411 	struct uhci_qh *qh;
1412 
1413 	spin_lock_irqsave(&uhci->lock, flags);
1414 
1415 	ret = usb_hcd_link_urb_to_ep(hcd, urb);
1416 	if (ret)
1417 		goto done_not_linked;
1418 
1419 	ret = -ENOMEM;
1420 	urbp = uhci_alloc_urb_priv(uhci, urb);
1421 	if (!urbp)
1422 		goto done;
1423 
1424 	if (urb->ep->hcpriv)
1425 		qh = urb->ep->hcpriv;
1426 	else {
1427 		qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1428 		if (!qh)
1429 			goto err_no_qh;
1430 	}
1431 	urbp->qh = qh;
1432 
1433 	switch (qh->type) {
1434 	case USB_ENDPOINT_XFER_CONTROL:
1435 		ret = uhci_submit_control(uhci, urb, qh);
1436 		break;
1437 	case USB_ENDPOINT_XFER_BULK:
1438 		ret = uhci_submit_bulk(uhci, urb, qh);
1439 		break;
1440 	case USB_ENDPOINT_XFER_INT:
1441 		ret = uhci_submit_interrupt(uhci, urb, qh);
1442 		break;
1443 	case USB_ENDPOINT_XFER_ISOC:
1444 		urb->error_count = 0;
1445 		ret = uhci_submit_isochronous(uhci, urb, qh);
1446 		break;
1447 	}
1448 	if (ret != 0)
1449 		goto err_submit_failed;
1450 
1451 	/* Add this URB to the QH */
1452 	list_add_tail(&urbp->node, &qh->queue);
1453 
1454 	/* If the new URB is the first and only one on this QH then either
1455 	 * the QH is new and idle or else it's unlinked and waiting to
1456 	 * become idle, so we can activate it right away.  But only if the
1457 	 * queue isn't stopped. */
1458 	if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1459 		uhci_activate_qh(uhci, qh);
1460 		uhci_urbp_wants_fsbr(uhci, urbp);
1461 	}
1462 	goto done;
1463 
1464 err_submit_failed:
1465 	if (qh->state == QH_STATE_IDLE)
1466 		uhci_make_qh_idle(uhci, qh);	/* Reclaim unused QH */
1467 err_no_qh:
1468 	uhci_free_urb_priv(uhci, urbp);
1469 done:
1470 	if (ret)
1471 		usb_hcd_unlink_urb_from_ep(hcd, urb);
1472 done_not_linked:
1473 	spin_unlock_irqrestore(&uhci->lock, flags);
1474 	return ret;
1475 }
1476 
uhci_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)1477 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1478 {
1479 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1480 	unsigned long flags;
1481 	struct uhci_qh *qh;
1482 	int rc;
1483 
1484 	spin_lock_irqsave(&uhci->lock, flags);
1485 	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1486 	if (rc)
1487 		goto done;
1488 
1489 	qh = ((struct urb_priv *) urb->hcpriv)->qh;
1490 
1491 	/* Remove Isochronous TDs from the frame list ASAP */
1492 	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1493 		uhci_unlink_isochronous_tds(uhci, urb);
1494 		mb();
1495 
1496 		/* If the URB has already started, update the QH unlink time */
1497 		uhci_get_current_frame_number(uhci);
1498 		if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1499 			qh->unlink_frame = uhci->frame_number;
1500 	}
1501 
1502 	uhci_unlink_qh(uhci, qh);
1503 
1504 done:
1505 	spin_unlock_irqrestore(&uhci->lock, flags);
1506 	return rc;
1507 }
1508 
1509 /*
1510  * Finish unlinking an URB and give it back
1511  */
uhci_giveback_urb(struct uhci_hcd * uhci,struct uhci_qh * qh,struct urb * urb,int status)1512 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1513 		struct urb *urb, int status)
1514 __releases(uhci->lock)
1515 __acquires(uhci->lock)
1516 {
1517 	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1518 
1519 	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1520 
1521 		/* Subtract off the length of the SETUP packet from
1522 		 * urb->actual_length.
1523 		 */
1524 		urb->actual_length -= min_t(u32, 8, urb->actual_length);
1525 	}
1526 
1527 	/* When giving back the first URB in an Isochronous queue,
1528 	 * reinitialize the QH's iso-related members for the next URB. */
1529 	else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1530 			urbp->node.prev == &qh->queue &&
1531 			urbp->node.next != &qh->queue) {
1532 		struct urb *nurb = list_entry(urbp->node.next,
1533 				struct urb_priv, node)->urb;
1534 
1535 		qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1536 		qh->iso_frame = nurb->start_frame;
1537 	}
1538 
1539 	/* Take the URB off the QH's queue.  If the queue is now empty,
1540 	 * this is a perfect time for a toggle fixup. */
1541 	list_del_init(&urbp->node);
1542 	if (list_empty(&qh->queue) && qh->needs_fixup) {
1543 		usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1544 				usb_pipeout(urb->pipe), qh->initial_toggle);
1545 		qh->needs_fixup = 0;
1546 	}
1547 
1548 	uhci_free_urb_priv(uhci, urbp);
1549 	usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1550 
1551 	spin_unlock(&uhci->lock);
1552 	usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1553 	spin_lock(&uhci->lock);
1554 
1555 	/* If the queue is now empty, we can unlink the QH and give up its
1556 	 * reserved bandwidth. */
1557 	if (list_empty(&qh->queue)) {
1558 		uhci_unlink_qh(uhci, qh);
1559 		if (qh->bandwidth_reserved)
1560 			uhci_release_bandwidth(uhci, qh);
1561 	}
1562 }
1563 
1564 /*
1565  * Scan the URBs in a QH's queue
1566  */
1567 #define QH_FINISHED_UNLINKING(qh)			\
1568 		(qh->state == QH_STATE_UNLINKING &&	\
1569 		uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1570 
uhci_scan_qh(struct uhci_hcd * uhci,struct uhci_qh * qh)1571 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1572 {
1573 	struct urb_priv *urbp;
1574 	struct urb *urb;
1575 	int status;
1576 
1577 	while (!list_empty(&qh->queue)) {
1578 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1579 		urb = urbp->urb;
1580 
1581 		if (qh->type == USB_ENDPOINT_XFER_ISOC)
1582 			status = uhci_result_isochronous(uhci, urb);
1583 		else
1584 			status = uhci_result_common(uhci, urb);
1585 		if (status == -EINPROGRESS)
1586 			break;
1587 
1588 		/* Dequeued but completed URBs can't be given back unless
1589 		 * the QH is stopped or has finished unlinking. */
1590 		if (urb->unlinked) {
1591 			if (QH_FINISHED_UNLINKING(qh))
1592 				qh->is_stopped = 1;
1593 			else if (!qh->is_stopped)
1594 				return;
1595 		}
1596 
1597 		uhci_giveback_urb(uhci, qh, urb, status);
1598 		if (status < 0)
1599 			break;
1600 	}
1601 
1602 	/* If the QH is neither stopped nor finished unlinking (normal case),
1603 	 * our work here is done. */
1604 	if (QH_FINISHED_UNLINKING(qh))
1605 		qh->is_stopped = 1;
1606 	else if (!qh->is_stopped)
1607 		return;
1608 
1609 	/* Otherwise give back each of the dequeued URBs */
1610 restart:
1611 	list_for_each_entry(urbp, &qh->queue, node) {
1612 		urb = urbp->urb;
1613 		if (urb->unlinked) {
1614 
1615 			/* Fix up the TD links and save the toggles for
1616 			 * non-Isochronous queues.  For Isochronous queues,
1617 			 * test for too-recent dequeues. */
1618 			if (!uhci_cleanup_queue(uhci, qh, urb)) {
1619 				qh->is_stopped = 0;
1620 				return;
1621 			}
1622 			uhci_giveback_urb(uhci, qh, urb, 0);
1623 			goto restart;
1624 		}
1625 	}
1626 	qh->is_stopped = 0;
1627 
1628 	/* There are no more dequeued URBs.  If there are still URBs on the
1629 	 * queue, the QH can now be re-activated. */
1630 	if (!list_empty(&qh->queue)) {
1631 		if (qh->needs_fixup)
1632 			uhci_fixup_toggles(qh, 0);
1633 
1634 		/* If the first URB on the queue wants FSBR but its time
1635 		 * limit has expired, set the next TD to interrupt on
1636 		 * completion before reactivating the QH. */
1637 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1638 		if (urbp->fsbr && qh->wait_expired) {
1639 			struct uhci_td *td = list_entry(urbp->td_list.next,
1640 					struct uhci_td, list);
1641 
1642 			td->status |= __cpu_to_le32(TD_CTRL_IOC);
1643 		}
1644 
1645 		uhci_activate_qh(uhci, qh);
1646 	}
1647 
1648 	/* The queue is empty.  The QH can become idle if it is fully
1649 	 * unlinked. */
1650 	else if (QH_FINISHED_UNLINKING(qh))
1651 		uhci_make_qh_idle(uhci, qh);
1652 }
1653 
1654 /*
1655  * Check for queues that have made some forward progress.
1656  * Returns 0 if the queue is not Isochronous, is ACTIVE, and
1657  * has not advanced since last examined; 1 otherwise.
1658  *
1659  * Early Intel controllers have a bug which causes qh->element sometimes
1660  * not to advance when a TD completes successfully.  The queue remains
1661  * stuck on the inactive completed TD.  We detect such cases and advance
1662  * the element pointer by hand.
1663  */
uhci_advance_check(struct uhci_hcd * uhci,struct uhci_qh * qh)1664 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1665 {
1666 	struct urb_priv *urbp = NULL;
1667 	struct uhci_td *td;
1668 	int ret = 1;
1669 	unsigned status;
1670 
1671 	if (qh->type == USB_ENDPOINT_XFER_ISOC)
1672 		goto done;
1673 
1674 	/* Treat an UNLINKING queue as though it hasn't advanced.
1675 	 * This is okay because reactivation will treat it as though
1676 	 * it has advanced, and if it is going to become IDLE then
1677 	 * this doesn't matter anyway.  Furthermore it's possible
1678 	 * for an UNLINKING queue not to have any URBs at all, or
1679 	 * for its first URB not to have any TDs (if it was dequeued
1680 	 * just as it completed).  So it's not easy in any case to
1681 	 * test whether such queues have advanced. */
1682 	if (qh->state != QH_STATE_ACTIVE) {
1683 		urbp = NULL;
1684 		status = 0;
1685 
1686 	} else {
1687 		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1688 		td = list_entry(urbp->td_list.next, struct uhci_td, list);
1689 		status = td_status(td);
1690 		if (!(status & TD_CTRL_ACTIVE)) {
1691 
1692 			/* We're okay, the queue has advanced */
1693 			qh->wait_expired = 0;
1694 			qh->advance_jiffies = jiffies;
1695 			goto done;
1696 		}
1697 		ret = uhci->is_stopped;
1698 	}
1699 
1700 	/* The queue hasn't advanced; check for timeout */
1701 	if (qh->wait_expired)
1702 		goto done;
1703 
1704 	if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1705 
1706 		/* Detect the Intel bug and work around it */
1707 		if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) {
1708 			qh->element = qh->post_td->link;
1709 			qh->advance_jiffies = jiffies;
1710 			ret = 1;
1711 			goto done;
1712 		}
1713 
1714 		qh->wait_expired = 1;
1715 
1716 		/* If the current URB wants FSBR, unlink it temporarily
1717 		 * so that we can safely set the next TD to interrupt on
1718 		 * completion.  That way we'll know as soon as the queue
1719 		 * starts moving again. */
1720 		if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1721 			uhci_unlink_qh(uhci, qh);
1722 
1723 	} else {
1724 		/* Unmoving but not-yet-expired queues keep FSBR alive */
1725 		if (urbp)
1726 			uhci_urbp_wants_fsbr(uhci, urbp);
1727 	}
1728 
1729 done:
1730 	return ret;
1731 }
1732 
1733 /*
1734  * Process events in the schedule, but only in one thread at a time
1735  */
uhci_scan_schedule(struct uhci_hcd * uhci)1736 static void uhci_scan_schedule(struct uhci_hcd *uhci)
1737 {
1738 	int i;
1739 	struct uhci_qh *qh;
1740 
1741 	/* Don't allow re-entrant calls */
1742 	if (uhci->scan_in_progress) {
1743 		uhci->need_rescan = 1;
1744 		return;
1745 	}
1746 	uhci->scan_in_progress = 1;
1747 rescan:
1748 	uhci->need_rescan = 0;
1749 	uhci->fsbr_is_wanted = 0;
1750 
1751 	uhci_clear_next_interrupt(uhci);
1752 	uhci_get_current_frame_number(uhci);
1753 	uhci->cur_iso_frame = uhci->frame_number;
1754 
1755 	/* Go through all the QH queues and process the URBs in each one */
1756 	for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1757 		uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1758 				struct uhci_qh, node);
1759 		while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1760 			uhci->next_qh = list_entry(qh->node.next,
1761 					struct uhci_qh, node);
1762 
1763 			if (uhci_advance_check(uhci, qh)) {
1764 				uhci_scan_qh(uhci, qh);
1765 				if (qh->state == QH_STATE_ACTIVE) {
1766 					uhci_urbp_wants_fsbr(uhci,
1767 	list_entry(qh->queue.next, struct urb_priv, node));
1768 				}
1769 			}
1770 		}
1771 	}
1772 
1773 	uhci->last_iso_frame = uhci->cur_iso_frame;
1774 	if (uhci->need_rescan)
1775 		goto rescan;
1776 	uhci->scan_in_progress = 0;
1777 
1778 	if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1779 			!uhci->fsbr_expiring) {
1780 		uhci->fsbr_expiring = 1;
1781 		mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1782 	}
1783 
1784 	if (list_empty(&uhci->skel_unlink_qh->node))
1785 		uhci_clear_next_interrupt(uhci);
1786 	else
1787 		uhci_set_next_interrupt(uhci);
1788 }
1789