1 /*
2 * Copyright (c) 2001-2003 by David Brownell
3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20 /* this file is part of ehci-hcd.c */
21
22 /*-------------------------------------------------------------------------*/
23
24 /*
25 * EHCI scheduled transaction support: interrupt, iso, split iso
26 * These are called "periodic" transactions in the EHCI spec.
27 *
28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
29 * with the "asynchronous" transaction support (control/bulk transfers).
30 * The only real difference is in how interrupt transfers are scheduled.
31 *
32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
34 * pre-calculated schedule data to make appending to the queue be quick.
35 */
36
37 static int ehci_get_frame (struct usb_hcd *hcd);
38
39 /*-------------------------------------------------------------------------*/
40
41 /*
42 * periodic_next_shadow - return "next" pointer on shadow list
43 * @periodic: host pointer to qh/itd/sitd
44 * @tag: hardware tag for type of this record
45 */
46 static union ehci_shadow *
periodic_next_shadow(union ehci_shadow * periodic,int tag)47 periodic_next_shadow (union ehci_shadow *periodic, int tag)
48 {
49 switch (tag) {
50 case Q_TYPE_QH:
51 return &periodic->qh->qh_next;
52 case Q_TYPE_FSTN:
53 return &periodic->fstn->fstn_next;
54 case Q_TYPE_ITD:
55 return &periodic->itd->itd_next;
56 #ifdef have_split_iso
57 case Q_TYPE_SITD:
58 return &periodic->sitd->sitd_next;
59 #endif /* have_split_iso */
60 }
61 dbg ("BAD shadow %p tag %d", periodic->ptr, tag);
62 // BUG ();
63 return 0;
64 }
65
66 /* returns true after successful unlink */
67 /* caller must hold ehci->lock */
periodic_unlink(struct ehci_hcd * ehci,unsigned frame,void * ptr)68 static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
69 {
70 union ehci_shadow *prev_p = &ehci->pshadow [frame];
71 u32 *hw_p = &ehci->periodic [frame];
72 union ehci_shadow here = *prev_p;
73 union ehci_shadow *next_p;
74
75 /* find predecessor of "ptr"; hw and shadow lists are in sync */
76 while (here.ptr && here.ptr != ptr) {
77 prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p));
78 hw_p = &here.qh->hw_next;
79 here = *prev_p;
80 }
81 /* an interrupt entry (at list end) could have been shared */
82 if (!here.ptr) {
83 dbg ("entry %p no longer on frame [%d]", ptr, frame);
84 return 0;
85 }
86 // vdbg ("periodic unlink %p from frame %d", ptr, frame);
87
88 /* update hardware list ... HC may still know the old structure, so
89 * don't change hw_next until it'll have purged its cache
90 */
91 next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
92 *hw_p = here.qh->hw_next;
93
94 /* unlink from shadow list; HCD won't see old structure again */
95 *prev_p = *next_p;
96 next_p->ptr = 0;
97
98 return 1;
99 }
100
101 /* how many of the uframe's 125 usecs are allocated? */
102 static unsigned short
periodic_usecs(struct ehci_hcd * ehci,unsigned frame,unsigned uframe)103 periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
104 {
105 u32 *hw_p = &ehci->periodic [frame];
106 union ehci_shadow *q = &ehci->pshadow [frame];
107 unsigned usecs = 0;
108
109 while (q->ptr) {
110 switch (Q_NEXT_TYPE (*hw_p)) {
111 case Q_TYPE_QH:
112 /* is it in the S-mask? */
113 if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe))
114 usecs += q->qh->usecs;
115 /* ... or C-mask? */
116 if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe)))
117 usecs += q->qh->c_usecs;
118 q = &q->qh->qh_next;
119 break;
120 case Q_TYPE_FSTN:
121 /* for "save place" FSTNs, count the relevant INTR
122 * bandwidth from the previous frame
123 */
124 if (q->fstn->hw_prev != EHCI_LIST_END) {
125 dbg ("not counting FSTN bandwidth yet ...");
126 }
127 q = &q->fstn->fstn_next;
128 break;
129 case Q_TYPE_ITD:
130 usecs += q->itd->usecs [uframe];
131 q = &q->itd->itd_next;
132 break;
133 #ifdef have_split_iso
134 case Q_TYPE_SITD:
135 temp = q->sitd->hw_fullspeed_ep &
136 __constant_cpu_to_le32 (1 << 31);
137
138 // FIXME: this doesn't count data bytes right...
139
140 /* is it in the S-mask? (count SPLIT, DATA) */
141 if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) {
142 if (temp)
143 usecs += HS_USECS (188);
144 else
145 usecs += HS_USECS (1);
146 }
147
148 /* ... C-mask? (count CSPLIT, DATA) */
149 if (q->sitd->hw_uframe &
150 cpu_to_le32 (1 << (8 + uframe))) {
151 if (temp)
152 usecs += HS_USECS (0);
153 else
154 usecs += HS_USECS (188);
155 }
156 q = &q->sitd->sitd_next;
157 break;
158 #endif /* have_split_iso */
159 default:
160 BUG ();
161 }
162 }
163 #ifdef DEBUG
164 if (usecs > 100)
165 err ("overallocated uframe %d, periodic is %d usecs",
166 frame * 8 + uframe, usecs);
167 #endif
168 return usecs;
169 }
170
171 /*-------------------------------------------------------------------------*/
172
enable_periodic(struct ehci_hcd * ehci)173 static int enable_periodic (struct ehci_hcd *ehci)
174 {
175 u32 cmd;
176 int status;
177
178 /* did clearing PSE did take effect yet?
179 * takes effect only at frame boundaries...
180 */
181 status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125);
182 if (status != 0) {
183 ehci->hcd.state = USB_STATE_HALT;
184 return status;
185 }
186
187 cmd = readl (&ehci->regs->command) | CMD_PSE;
188 writel (cmd, &ehci->regs->command);
189 /* posted write ... PSS happens later */
190 ehci->hcd.state = USB_STATE_RUNNING;
191
192 /* make sure ehci_work scans these */
193 ehci->next_uframe = readl (&ehci->regs->frame_index)
194 % (ehci->periodic_size << 3);
195 return 0;
196 }
197
disable_periodic(struct ehci_hcd * ehci)198 static int disable_periodic (struct ehci_hcd *ehci)
199 {
200 u32 cmd;
201 int status;
202
203 /* did setting PSE not take effect yet?
204 * takes effect only at frame boundaries...
205 */
206 status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125);
207 if (status != 0) {
208 ehci->hcd.state = USB_STATE_HALT;
209 return status;
210 }
211
212 cmd = readl (&ehci->regs->command) & ~CMD_PSE;
213 writel (cmd, &ehci->regs->command);
214 /* posted write ... */
215
216 ehci->next_uframe = -1;
217 return 0;
218 }
219
220 /*-------------------------------------------------------------------------*/
221
222 // FIXME microframe periods not yet handled
223
intr_deschedule(struct ehci_hcd * ehci,struct ehci_qh * qh,int wait)224 static void intr_deschedule (
225 struct ehci_hcd *ehci,
226 struct ehci_qh *qh,
227 int wait
228 ) {
229 int status;
230 unsigned frame = qh->start;
231
232 do {
233 periodic_unlink (ehci, frame, qh);
234 qh_put (ehci, qh);
235 frame += qh->period;
236 } while (frame < ehci->periodic_size);
237
238 qh->qh_state = QH_STATE_UNLINK;
239 qh->qh_next.ptr = 0;
240 ehci->periodic_sched--;
241
242 /* maybe turn off periodic schedule */
243 if (!ehci->periodic_sched)
244 status = disable_periodic (ehci);
245 else {
246 status = 0;
247 vdbg ("periodic schedule still enabled");
248 }
249
250 /*
251 * If the hc may be looking at this qh, then delay a uframe
252 * (yeech!) to be sure it's done.
253 * No other threads may be mucking with this qh.
254 */
255 if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) {
256 if (wait) {
257 udelay (125);
258 qh->hw_next = EHCI_LIST_END;
259 } else {
260 /* we may not be IDLE yet, but if the qh is empty
261 * the race is very short. then if qh also isn't
262 * rescheduled soon, it won't matter. otherwise...
263 */
264 vdbg ("intr_deschedule...");
265 }
266 } else
267 qh->hw_next = EHCI_LIST_END;
268
269 qh->qh_state = QH_STATE_IDLE;
270
271 /* update per-qh bandwidth utilization (for usbfs) */
272 hcd_to_bus (&ehci->hcd)->bandwidth_allocated -=
273 (qh->usecs + qh->c_usecs) / qh->period;
274
275 dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d",
276 qh, qh->period, frame,
277 atomic_read (&qh->refcount), ehci->periodic_sched);
278 }
279
check_period(struct ehci_hcd * ehci,unsigned frame,unsigned uframe,unsigned period,unsigned usecs)280 static int check_period (
281 struct ehci_hcd *ehci,
282 unsigned frame,
283 unsigned uframe,
284 unsigned period,
285 unsigned usecs
286 ) {
287 /* complete split running into next frame?
288 * given FSTN support, we could sometimes check...
289 */
290 if (uframe >= 8)
291 return 0;
292
293 /*
294 * 80% periodic == 100 usec/uframe available
295 * convert "usecs we need" to "max already claimed"
296 */
297 usecs = 100 - usecs;
298
299 do {
300 int claimed;
301
302 // FIXME delete when intr_submit handles non-empty queues
303 // this gives us a one intr/frame limit (vs N/uframe)
304 // ... and also lets us avoid tracking split transactions
305 // that might collide at a given TT/hub.
306 if (ehci->pshadow [frame].ptr)
307 return 0;
308
309 claimed = periodic_usecs (ehci, frame, uframe);
310 if (claimed > usecs)
311 return 0;
312
313 // FIXME update to handle sub-frame periods
314 } while ((frame += period) < ehci->periodic_size);
315
316 // success!
317 return 1;
318 }
319
check_intr_schedule(struct ehci_hcd * ehci,unsigned frame,unsigned uframe,const struct ehci_qh * qh,u32 * c_maskp)320 static int check_intr_schedule (
321 struct ehci_hcd *ehci,
322 unsigned frame,
323 unsigned uframe,
324 const struct ehci_qh *qh,
325 u32 *c_maskp
326 )
327 {
328 int retval = -ENOSPC;
329
330 if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
331 goto done;
332 if (!qh->c_usecs) {
333 retval = 0;
334 *c_maskp = cpu_to_le32 (0);
335 goto done;
336 }
337
338 /* This is a split transaction; check the bandwidth available for
339 * the completion too. Check both worst and best case gaps: worst
340 * case is SPLIT near uframe end, and CSPLIT near start ... best is
341 * vice versa. Difference can be almost two uframe times, but we
342 * reserve unnecessary bandwidth (waste it) this way. (Actually
343 * even better cases exist, like immediate device NAK.)
344 *
345 * FIXME don't even bother unless we know this TT is idle in that
346 * range of uframes ... for now, check_period() allows only one
347 * interrupt transfer per frame, so needn't check "TT busy" status
348 * when scheduling a split (QH, SITD, or FSTN).
349 *
350 * FIXME ehci 0.96 and above can use FSTNs
351 */
352 if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
353 qh->period, qh->c_usecs))
354 goto done;
355 if (!check_period (ehci, frame, uframe + qh->gap_uf,
356 qh->period, qh->c_usecs))
357 goto done;
358
359 *c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf));
360 retval = 0;
361 done:
362 return retval;
363 }
364
qh_schedule(struct ehci_hcd * ehci,struct ehci_qh * qh)365 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
366 {
367 int status;
368 unsigned uframe;
369 u32 c_mask;
370 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
371
372 qh->hw_next = EHCI_LIST_END;
373 frame = qh->start;
374
375 /* reuse the previous schedule slots, if we can */
376 if (frame < qh->period) {
377 uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
378 status = check_intr_schedule (ehci, frame, --uframe,
379 qh, &c_mask);
380 } else {
381 uframe = 0;
382 c_mask = 0;
383 status = -ENOSPC;
384 }
385
386 /* else scan the schedule to find a group of slots such that all
387 * uframes have enough periodic bandwidth available.
388 */
389 if (status) {
390 frame = qh->period - 1;
391 do {
392 for (uframe = 0; uframe < 8; uframe++) {
393 status = check_intr_schedule (ehci,
394 frame, uframe, qh,
395 &c_mask);
396 if (status == 0)
397 break;
398 }
399 } while (status && frame--);
400 if (status)
401 goto done;
402 qh->start = frame;
403
404 /* reset S-frame and (maybe) C-frame masks */
405 qh->hw_info2 &= ~__constant_cpu_to_le32(0xffff);
406 qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
407 } else
408 dbg ("reused previous qh %p schedule", qh);
409
410 /* stuff into the periodic schedule */
411 qh->qh_state = QH_STATE_LINKED;
412 dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)",
413 qh, qh->usecs, qh->c_usecs,
414 qh->period, frame, uframe, qh->gap_uf);
415 do {
416 if (unlikely (ehci->pshadow [frame].ptr != 0)) {
417
418 // FIXME -- just link toward the end, before any qh with a shorter period,
419 // AND accomodate it already having been linked here (after some other qh)
420 // AS WELL AS updating the schedule checking logic
421
422 BUG ();
423 } else {
424 ehci->pshadow [frame].qh = qh_get (qh);
425 ehci->periodic [frame] =
426 QH_NEXT (qh->qh_dma);
427 }
428 wmb ();
429 frame += qh->period;
430 } while (frame < ehci->periodic_size);
431
432 /* update per-qh bandwidth for usbfs */
433 hcd_to_bus (&ehci->hcd)->bandwidth_allocated +=
434 (qh->usecs + qh->c_usecs) / qh->period;
435
436 /* maybe enable periodic schedule processing */
437 if (!ehci->periodic_sched++)
438 status = enable_periodic (ehci);
439 done:
440 return status;
441 }
442
intr_submit(struct ehci_hcd * ehci,struct urb * urb,struct list_head * qtd_list,int mem_flags)443 static int intr_submit (
444 struct ehci_hcd *ehci,
445 struct urb *urb,
446 struct list_head *qtd_list,
447 int mem_flags
448 ) {
449 unsigned epnum;
450 unsigned long flags;
451 struct ehci_qh *qh;
452 struct hcd_dev *dev;
453 int is_input;
454 int status = 0;
455 struct list_head empty;
456
457 /* get endpoint and transfer/schedule data */
458 epnum = usb_pipeendpoint (urb->pipe);
459 is_input = usb_pipein (urb->pipe);
460 if (is_input)
461 epnum |= 0x10;
462
463 spin_lock_irqsave (&ehci->lock, flags);
464 dev = (struct hcd_dev *)urb->dev->hcpriv;
465
466 /* get qh and force any scheduling errors */
467 INIT_LIST_HEAD (&empty);
468 qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]);
469 if (qh == 0) {
470 status = -ENOMEM;
471 goto done;
472 }
473 if (qh->qh_state == QH_STATE_IDLE) {
474 if ((status = qh_schedule (ehci, qh)) != 0)
475 goto done;
476 }
477
478 /* then queue the urb's tds to the qh */
479 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
480 BUG_ON (qh == 0);
481
482 /* ... update usbfs periodic stats */
483 hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++;
484
485 done:
486 spin_unlock_irqrestore (&ehci->lock, flags);
487 if (status)
488 qtd_list_free (ehci, urb, qtd_list);
489
490 return status;
491 }
492
493 /*-------------------------------------------------------------------------*/
494
495 static inline struct ehci_iso_stream *
iso_stream_alloc(int mem_flags)496 iso_stream_alloc (int mem_flags)
497 {
498 struct ehci_iso_stream *stream;
499
500 stream = kmalloc(sizeof *stream, mem_flags);
501 if (likely (stream != 0)) {
502 memset (stream, 0, sizeof(*stream));
503 INIT_LIST_HEAD(&stream->itd_list);
504 INIT_LIST_HEAD(&stream->free_itd_list);
505 stream->next_uframe = -1;
506 stream->refcount = 1;
507 }
508 return stream;
509 }
510
511 static inline void
iso_stream_init(struct ehci_iso_stream * stream,struct usb_device * dev,int pipe,unsigned interval)512 iso_stream_init (
513 struct ehci_iso_stream *stream,
514 struct usb_device *dev,
515 int pipe,
516 unsigned interval
517 )
518 {
519 u32 buf1;
520 unsigned epnum, maxp, multi;
521 int is_input;
522 long bandwidth;
523
524 /*
525 * this might be a "high bandwidth" highspeed endpoint,
526 * as encoded in the ep descriptor's wMaxPacket field
527 */
528 epnum = usb_pipeendpoint (pipe);
529 is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
530 if (is_input) {
531 maxp = dev->epmaxpacketin [epnum];
532 buf1 = (1 << 11);
533 } else {
534 maxp = dev->epmaxpacketout [epnum];
535 buf1 = 0;
536 }
537
538 multi = hb_mult(maxp);
539 maxp = max_packet(maxp);
540 buf1 |= maxp;
541 maxp *= multi;
542
543 stream->dev = (struct hcd_dev *)dev->hcpriv;
544
545 stream->bEndpointAddress = is_input | epnum;
546 stream->interval = interval;
547 stream->maxp = maxp;
548
549 stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum);
550 stream->buf1 = cpu_to_le32 (buf1);
551 stream->buf2 = cpu_to_le32 (multi);
552
553 /* usbfs wants to report the average usecs per frame tied up
554 * when transfers on this endpoint are scheduled ...
555 */
556 stream->usecs = HS_USECS_ISO (maxp);
557 bandwidth = stream->usecs * 8;
558 bandwidth /= 1 << (interval - 1);
559 stream->bandwidth = bandwidth;
560 }
561
562 static void
iso_stream_put(struct ehci_hcd * ehci,struct ehci_iso_stream * stream)563 iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
564 {
565 stream->refcount--;
566
567 /* free whenever just a dev->ep reference remains.
568 * not like a QH -- no persistent state (toggle, halt)
569 */
570 if (stream->refcount == 1) {
571 int is_in;
572
573 // BUG_ON (!list_empty(&stream->itd_list));
574
575 while (!list_empty (&stream->free_itd_list)) {
576 struct ehci_itd *itd;
577
578 itd = list_entry (stream->free_itd_list.next,
579 struct ehci_itd, itd_list);
580 list_del (&itd->itd_list);
581 pci_pool_free (ehci->itd_pool, itd, itd->itd_dma);
582 }
583
584 is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
585 stream->bEndpointAddress &= 0x0f;
586 stream->dev->ep [is_in + stream->bEndpointAddress] = 0;
587
588 if (stream->rescheduled) {
589 ehci_info (ehci, "ep%d%s-iso rescheduled "
590 "%lu times in %lu seconds\n",
591 stream->bEndpointAddress, is_in ? "in" : "out",
592 stream->rescheduled,
593 ((jiffies - stream->start)/HZ)
594 );
595 }
596
597 kfree(stream);
598 }
599 }
600
601 static inline struct ehci_iso_stream *
iso_stream_get(struct ehci_iso_stream * stream)602 iso_stream_get (struct ehci_iso_stream *stream)
603 {
604 if (likely (stream != 0))
605 stream->refcount++;
606 return stream;
607 }
608
609 static struct ehci_iso_stream *
iso_stream_find(struct ehci_hcd * ehci,struct urb * urb)610 iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
611 {
612 unsigned epnum;
613 struct hcd_dev *dev;
614 struct ehci_iso_stream *stream;
615 unsigned long flags;
616
617 epnum = usb_pipeendpoint (urb->pipe);
618 if (usb_pipein(urb->pipe))
619 epnum += 0x10;
620
621 spin_lock_irqsave (&ehci->lock, flags);
622
623 dev = (struct hcd_dev *)urb->dev->hcpriv;
624 stream = dev->ep [epnum];
625
626 if (unlikely (stream == 0)) {
627 stream = iso_stream_alloc(GFP_ATOMIC);
628 if (likely (stream != 0)) {
629 /* dev->ep owns the initial refcount */
630 dev->ep[epnum] = stream;
631 iso_stream_init(stream, urb->dev, urb->pipe,
632 urb->interval);
633 }
634
635 /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
636 } else if (unlikely (stream->hw_info1 != 0)) {
637 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
638 urb->dev->devpath, epnum & 0x0f,
639 (epnum & 0x10) ? "in" : "out");
640 stream = 0;
641 }
642
643 /* caller guarantees an eventual matching iso_stream_put */
644 stream = iso_stream_get (stream);
645
646 spin_unlock_irqrestore (&ehci->lock, flags);
647 return stream;
648 }
649
650 /*-------------------------------------------------------------------------*/
651
652 static inline struct ehci_itd_sched *
itd_sched_alloc(unsigned packets,int mem_flags)653 itd_sched_alloc (unsigned packets, int mem_flags)
654 {
655 struct ehci_itd_sched *itd_sched;
656 int size = sizeof *itd_sched;
657
658 size += packets * sizeof (struct ehci_iso_uframe);
659 itd_sched = kmalloc (size, mem_flags);
660 if (likely (itd_sched != 0)) {
661 memset(itd_sched, 0, size);
662 INIT_LIST_HEAD (&itd_sched->itd_list);
663 }
664 return itd_sched;
665 }
666
667 static int
itd_sched_init(struct ehci_itd_sched * itd_sched,struct ehci_iso_stream * stream,struct urb * urb)668 itd_sched_init (
669 struct ehci_itd_sched *itd_sched,
670 struct ehci_iso_stream *stream,
671 struct urb *urb
672 )
673 {
674 unsigned i;
675 dma_addr_t dma = urb->transfer_dma;
676
677 /* how many uframes are needed for these transfers */
678 itd_sched->span = urb->number_of_packets * stream->interval;
679
680 /* figure out per-uframe itd fields that we'll need later
681 * when we fit new itds into the schedule.
682 */
683 for (i = 0; i < urb->number_of_packets; i++) {
684 struct ehci_iso_uframe *uframe = &itd_sched->packet [i];
685 unsigned length;
686 dma_addr_t buf;
687 u32 trans;
688
689 length = urb->iso_frame_desc [i].length;
690 buf = dma + urb->iso_frame_desc [i].offset;
691
692 trans = EHCI_ISOC_ACTIVE;
693 trans |= buf & 0x0fff;
694 if (unlikely (((i + 1) == urb->number_of_packets))
695 && !(urb->transfer_flags & URB_NO_INTERRUPT))
696 trans |= EHCI_ITD_IOC;
697 trans |= length << 16;
698 uframe->transaction = cpu_to_le32 (trans);
699
700 /* might need to cross a buffer page within a td */
701 uframe->bufp = (buf & ~(u64)0x0fff);
702 buf += length;
703 if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
704 uframe->cross = 1;
705 }
706 return 0;
707 }
708
709 static void
itd_sched_free(struct ehci_iso_stream * stream,struct ehci_itd_sched * itd_sched)710 itd_sched_free (
711 struct ehci_iso_stream *stream,
712 struct ehci_itd_sched *itd_sched
713 )
714 {
715 list_splice (&itd_sched->itd_list, &stream->free_itd_list);
716 kfree (itd_sched);
717 }
718
719 static int
itd_urb_transaction(struct ehci_iso_stream * stream,struct ehci_hcd * ehci,struct urb * urb,int mem_flags)720 itd_urb_transaction (
721 struct ehci_iso_stream *stream,
722 struct ehci_hcd *ehci,
723 struct urb *urb,
724 int mem_flags
725 )
726 {
727 struct ehci_itd *itd;
728 int status;
729 dma_addr_t itd_dma;
730 int i;
731 unsigned num_itds;
732 struct ehci_itd_sched *itd_sched;
733
734 itd_sched = itd_sched_alloc (urb->number_of_packets, mem_flags);
735 if (unlikely (itd_sched == 0))
736 return -ENOMEM;
737
738 status = itd_sched_init (itd_sched, stream, urb);
739 if (unlikely (status != 0)) {
740 itd_sched_free (stream, itd_sched);
741 return status;
742 }
743
744 if (urb->interval < 8)
745 num_itds = 1 + (itd_sched->span + 7) / 8;
746 else
747 num_itds = urb->number_of_packets;
748
749 /* allocate/init ITDs */
750 for (i = 0; i < num_itds; i++) {
751
752 /* free_itd_list.next might be cache-hot ... but maybe
753 * the HC caches it too. avoid that issue for now.
754 */
755
756 /* prefer previously-allocated itds */
757 if (likely (!list_empty(&stream->free_itd_list))) {
758 itd = list_entry (stream->free_itd_list.prev,
759 struct ehci_itd, itd_list);
760 list_del (&itd->itd_list);
761 itd_dma = itd->itd_dma;
762 } else
763 itd = pci_pool_alloc (ehci->itd_pool, mem_flags,
764 &itd_dma);
765
766 if (unlikely (0 == itd)) {
767 itd_sched_free (stream, itd_sched);
768 return -ENOMEM;
769 }
770 memset (itd, 0, sizeof *itd);
771 itd->itd_dma = itd_dma;
772 list_add (&itd->itd_list, &itd_sched->itd_list);
773 }
774
775 /* temporarily store schedule info in hcpriv */
776 urb->hcpriv = itd_sched;
777 urb->error_count = 0;
778 return 0;
779 }
780
781 /*
782 * This scheduler plans almost as far into the future as it has actual
783 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
784 * "as small as possible" to be cache-friendlier.) That limits the size
785 * transfers you can stream reliably; avoid more than 64 msec per urb.
786 * Also avoid queue depths of less than ehci's worst irq latency (affected
787 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
788 * and other factors); or more than about 230 msec total (for portability,
789 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
790 */
791
792 #define SCHEDULE_SLOP 10 /* frames */
793
794 static int
itd_stream_schedule(struct ehci_hcd * ehci,struct urb * urb,struct ehci_iso_stream * stream)795 itd_stream_schedule (
796 struct ehci_hcd *ehci,
797 struct urb *urb,
798 struct ehci_iso_stream *stream
799 )
800 {
801 u32 now, start, end, max;
802 int status;
803 unsigned mod = ehci->periodic_size << 3;
804 struct ehci_itd_sched *itd_sched = urb->hcpriv;
805
806 if (unlikely (itd_sched->span > (mod - 8 * SCHEDULE_SLOP))) {
807 ehci_dbg (ehci, "iso request %p too long\n", urb);
808 status = -EFBIG;
809 goto fail;
810 }
811
812 now = readl (&ehci->regs->frame_index) % mod;
813
814 /* when's the last uframe this urb could start? */
815 max = now + mod;
816 max -= itd_sched->span;
817 max -= 8 * SCHEDULE_SLOP;
818
819 /* typical case: reuse current schedule. stream is still active,
820 * and no gaps from host falling behind (irq delays etc)
821 */
822 if (likely (!list_empty (&stream->itd_list))) {
823
824 start = stream->next_uframe;
825 if (start < now)
826 start += mod;
827 if (likely (start < max))
828 goto ready;
829
830 /* two cases:
831 * (a) we missed some uframes ... can reschedule
832 * (b) trying to overcommit the schedule
833 * FIXME (b) should be a hard failure
834 */
835 }
836
837 /* need to schedule; when's the next (u)frame we could start?
838 * this is bigger than ehci->i_thresh allows; scheduling itself
839 * isn't free, the slop should handle reasonably slow cpus. it
840 * can also help high bandwidth if the dma and irq loads don't
841 * jump until after the queue is primed.
842 */
843 start = SCHEDULE_SLOP * 8 + (now & ~0x07);
844 end = start;
845
846 ehci_vdbg (ehci, "%s schedule from %d (%d..%d), was %d\n",
847 __FUNCTION__, now, start, max,
848 stream->next_uframe);
849
850 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
851
852 if (likely (max > (start + urb->interval)))
853 max = start + urb->interval;
854
855 /* hack: account for itds already scheduled to this endpoint */
856 if (unlikely (list_empty (&stream->itd_list)))
857 end = max;
858
859 /* within [start..max] find a uframe slot with enough bandwidth */
860 end %= mod;
861 do {
862 unsigned uframe;
863 int enough_space = 1;
864
865 /* check schedule: enough space? */
866 uframe = start;
867 do {
868 uframe %= mod;
869
870 /* can't commit more than 80% periodic == 100 usec */
871 if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
872 > (100 - stream->usecs)) {
873 enough_space = 0;
874 break;
875 }
876
877 /* we know urb->interval is 2^N uframes */
878 uframe += urb->interval;
879 } while (uframe != end);
880
881 /* (re)schedule it here if there's enough bandwidth */
882 if (enough_space) {
883 start %= mod;
884 if (unlikely (!list_empty (&stream->itd_list))) {
885 /* host fell behind ... maybe irq latencies
886 * delayed this request queue for too long.
887 */
888 stream->rescheduled++;
889 pr_debug ("ehci %s devpath %d "
890 "iso%d%s %d.%d skip %d.%d\n",
891 ehci->hcd.pdev->slot_name,
892 urb->dev->devpath,
893 stream->bEndpointAddress & 0x0f,
894 (stream->bEndpointAddress & USB_DIR_IN)
895 ? "in" : "out",
896 stream->next_uframe >> 3,
897 stream->next_uframe & 0x7,
898 start >> 3, start & 0x7);
899 }
900 stream->next_uframe = start;
901 goto ready;
902 }
903
904 } while (++start < max);
905
906 /* no room in the schedule */
907 ehci_dbg (ehci, "iso %ssched full %p (now %d end %d max %d)\n",
908 list_empty (&stream->itd_list) ? "" : "re",
909 urb, now, end, max);
910 status = -ENOSPC;
911
912 fail:
913 itd_sched_free (stream, itd_sched);
914 urb->hcpriv = 0;
915 return status;
916
917 ready:
918 urb->start_frame = stream->next_uframe;
919 return 0;
920 }
921
922 /*-------------------------------------------------------------------------*/
923
924 static inline void
itd_init(struct ehci_iso_stream * stream,struct ehci_itd * itd)925 itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd)
926 {
927 int i;
928
929 itd->hw_next = EHCI_LIST_END;
930 itd->hw_bufp [0] = stream->buf0;
931 itd->hw_bufp [1] = stream->buf1;
932 itd->hw_bufp [2] = stream->buf2;
933
934 for (i = 0; i < 8; i++)
935 itd->index[i] = -1;
936
937 /* All other fields are filled when scheduling */
938 }
939
940 static inline void
itd_patch(struct ehci_itd * itd,struct ehci_itd_sched * itd_sched,unsigned index,u16 uframe,int first)941 itd_patch (
942 struct ehci_itd *itd,
943 struct ehci_itd_sched *itd_sched,
944 unsigned index,
945 u16 uframe,
946 int first
947 )
948 {
949 struct ehci_iso_uframe *uf = &itd_sched->packet [index];
950 unsigned pg = itd->pg;
951
952 // BUG_ON (pg == 6 && uf->cross);
953
954 uframe &= 0x07;
955 itd->index [uframe] = index;
956
957 itd->hw_transaction [uframe] = uf->transaction;
958 itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12);
959 itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0);
960 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32));
961
962 /* iso_frame_desc[].offset must be strictly increasing */
963 if (unlikely (!first && uf->cross)) {
964 u64 bufp = uf->bufp + 4096;
965 itd->pg = ++pg;
966 itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0);
967 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32));
968 }
969 }
970
971 static inline void
itd_link(struct ehci_hcd * ehci,unsigned frame,struct ehci_itd * itd)972 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
973 {
974 /* always prepend ITD/SITD ... only QH tree is order-sensitive */
975 itd->itd_next = ehci->pshadow [frame];
976 itd->hw_next = ehci->periodic [frame];
977 ehci->pshadow [frame].itd = itd;
978 itd->frame = frame;
979 wmb ();
980 ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD;
981 }
982
983 /* fit urb's itds into the selected schedule slot; activate as needed */
984 static int
itd_link_urb(struct ehci_hcd * ehci,struct urb * urb,unsigned mod,struct ehci_iso_stream * stream)985 itd_link_urb (
986 struct ehci_hcd *ehci,
987 struct urb *urb,
988 unsigned mod,
989 struct ehci_iso_stream *stream
990 )
991 {
992 int packet, first = 1;
993 unsigned next_uframe, uframe, frame;
994 struct ehci_itd_sched *itd_sched = urb->hcpriv;
995 struct ehci_itd *itd;
996
997 next_uframe = stream->next_uframe % mod;
998
999 if (unlikely (list_empty(&stream->itd_list))) {
1000 hcd_to_bus (&ehci->hcd)->bandwidth_allocated
1001 += stream->bandwidth;
1002 ehci_vdbg (ehci,
1003 "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
1004 urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1005 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1006 urb->interval,
1007 next_uframe >> 3, next_uframe & 0x7);
1008 stream->start = jiffies;
1009 }
1010 hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs++;
1011
1012 /* fill iTDs uframe by uframe */
1013 for (packet = 0, itd = 0; packet < urb->number_of_packets; ) {
1014 if (itd == 0) {
1015 /* ASSERT: we have all necessary itds */
1016 // BUG_ON (list_empty (&itd_sched->itd_list));
1017
1018 /* ASSERT: no itds for this endpoint in this uframe */
1019
1020 itd = list_entry (itd_sched->itd_list.next,
1021 struct ehci_itd, itd_list);
1022 list_move_tail (&itd->itd_list, &stream->itd_list);
1023 itd->stream = iso_stream_get (stream);
1024 itd->urb = usb_get_urb (urb);
1025 first = 1;
1026 itd_init (stream, itd);
1027 }
1028
1029 uframe = next_uframe & 0x07;
1030 frame = next_uframe >> 3;
1031
1032 itd->usecs [uframe] = stream->usecs;
1033 itd_patch (itd, itd_sched, packet, uframe, first);
1034 first = 0;
1035
1036 next_uframe += stream->interval;
1037 next_uframe %= mod;
1038 packet++;
1039
1040 /* link completed itds into the schedule */
1041 if (((next_uframe >> 3) != frame)
1042 || packet == urb->number_of_packets) {
1043 itd_link (ehci, frame % ehci->periodic_size, itd);
1044 itd = 0;
1045 }
1046 }
1047 stream->next_uframe = next_uframe;
1048
1049 /* don't need that schedule data any more */
1050 itd_sched_free (stream, itd_sched);
1051 urb->hcpriv = 0;
1052
1053 if (unlikely (!ehci->periodic_sched++))
1054 return enable_periodic (ehci);
1055 return 0;
1056 }
1057
1058 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1059
1060 static unsigned
itd_complete(struct ehci_hcd * ehci,struct ehci_itd * itd,struct pt_regs * regs)1061 itd_complete (
1062 struct ehci_hcd *ehci,
1063 struct ehci_itd *itd,
1064 struct pt_regs *regs
1065 ) {
1066 struct urb *urb = itd->urb;
1067 struct usb_iso_packet_descriptor *desc;
1068 u32 t;
1069 unsigned uframe;
1070 int urb_index = -1;
1071 struct ehci_iso_stream *stream = itd->stream;
1072 struct usb_device *dev;
1073
1074 /* for each uframe with a packet */
1075 for (uframe = 0; uframe < 8; uframe++) {
1076 if (likely (itd->index[uframe] == -1))
1077 continue;
1078 urb_index = itd->index[uframe];
1079 desc = &urb->iso_frame_desc [urb_index];
1080
1081 t = le32_to_cpup (&itd->hw_transaction [uframe]);
1082 itd->hw_transaction [uframe] = 0;
1083
1084 /* report transfer status */
1085 if (unlikely (t & ISO_ERRS)) {
1086 urb->error_count++;
1087 if (t & EHCI_ISOC_BUF_ERR)
1088 desc->status = usb_pipein (urb->pipe)
1089 ? -ENOSR /* hc couldn't read */
1090 : -ECOMM; /* hc couldn't write */
1091 else if (t & EHCI_ISOC_BABBLE)
1092 desc->status = -EOVERFLOW;
1093 else /* (t & EHCI_ISOC_XACTERR) */
1094 desc->status = -EPROTO;
1095
1096 /* HC need not update length with this error */
1097 if (!(t & EHCI_ISOC_BABBLE))
1098 desc->actual_length = EHCI_ITD_LENGTH (t);
1099 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1100 desc->status = 0;
1101 desc->actual_length = EHCI_ITD_LENGTH (t);
1102 }
1103 }
1104
1105 usb_put_urb (urb);
1106 itd->urb = 0;
1107 itd->stream = 0;
1108 list_move (&itd->itd_list, &stream->free_itd_list);
1109 iso_stream_put (ehci, stream);
1110
1111 /* handle completion now? */
1112 if (likely ((urb_index + 1) != urb->number_of_packets))
1113 return 0;
1114
1115 /* ASSERT: it's really the last itd for this urb
1116 list_for_each_entry (itd, &stream->itd_list, itd_list)
1117 BUG_ON (itd->urb == urb);
1118 */
1119
1120 /* give urb back to the driver ... can be out-of-order */
1121 //dev = usb_get_dev (urb->dev);
1122 dev = urb->dev;
1123 ehci_urb_done (ehci, urb, regs);
1124 urb = 0;
1125
1126 /* defer stopping schedule; completion can submit */
1127 ehci->periodic_sched--;
1128 if (unlikely (!ehci->periodic_sched))
1129 (void) disable_periodic (ehci);
1130 hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs--;
1131
1132 if (unlikely (list_empty (&stream->itd_list))) {
1133 hcd_to_bus (&ehci->hcd)->bandwidth_allocated
1134 -= stream->bandwidth;
1135 ehci_vdbg (ehci,
1136 "deschedule devp %s ep%d%s-iso\n",
1137 dev->devpath, stream->bEndpointAddress & 0x0f,
1138 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1139 }
1140 iso_stream_put (ehci, stream);
1141 //usb_put_dev (dev);
1142
1143 return 1;
1144 }
1145
1146 /*-------------------------------------------------------------------------*/
1147
itd_submit(struct ehci_hcd * ehci,struct urb * urb,int mem_flags)1148 static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
1149 {
1150 int status = -EINVAL;
1151 unsigned long flags;
1152 struct ehci_iso_stream *stream;
1153
1154 /* Get iso_stream head */
1155 stream = iso_stream_find (ehci, urb);
1156 if (unlikely (stream == 0)) {
1157 ehci_dbg (ehci, "can't get iso stream\n");
1158 return -ENOMEM;
1159 }
1160 if (unlikely (urb->interval != stream->interval)) {
1161 ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1162 stream->interval, urb->interval);
1163 goto done;
1164 }
1165
1166 #ifdef EHCI_URB_TRACE
1167 ehci_dbg (ehci,
1168 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1169 __FUNCTION__, urb->dev->devpath, urb,
1170 usb_pipeendpoint (urb->pipe),
1171 usb_pipein (urb->pipe) ? "in" : "out",
1172 urb->transfer_buffer_length,
1173 urb->number_of_packets, urb->interval,
1174 stream);
1175 #endif
1176
1177 /* allocate ITDs w/o locking anything */
1178 status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1179 if (unlikely (status < 0)) {
1180 ehci_dbg (ehci, "can't init itds\n");
1181 goto done;
1182 }
1183
1184 /* schedule ... need to lock */
1185 spin_lock_irqsave (&ehci->lock, flags);
1186 status = itd_stream_schedule (ehci, urb, stream);
1187 if (likely (status == 0))
1188 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1189 spin_unlock_irqrestore (&ehci->lock, flags);
1190
1191 done:
1192 if (unlikely (status < 0))
1193 iso_stream_put (ehci, stream);
1194 return status;
1195 }
1196
1197 #ifdef have_split_iso
1198
1199 /*-------------------------------------------------------------------------*/
1200
1201 /*
1202 * "Split ISO TDs" ... used for USB 1.1 devices going through
1203 * the TTs in USB 2.0 hubs.
1204 *
1205 * FIXME not yet implemented
1206 */
1207
1208 #endif /* have_split_iso */
1209
1210 /*-------------------------------------------------------------------------*/
1211
1212 static void
scan_periodic(struct ehci_hcd * ehci,struct pt_regs * regs)1213 scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs)
1214 {
1215 unsigned frame, clock, now_uframe, mod;
1216 unsigned modified;
1217
1218 mod = ehci->periodic_size << 3;
1219
1220 /*
1221 * When running, scan from last scan point up to "now"
1222 * else clean up by scanning everything that's left.
1223 * Touches as few pages as possible: cache-friendly.
1224 */
1225 now_uframe = ehci->next_uframe;
1226 if (HCD_IS_RUNNING (ehci->hcd.state))
1227 clock = readl (&ehci->regs->frame_index);
1228 else
1229 clock = now_uframe + mod - 1;
1230 clock %= mod;
1231
1232 for (;;) {
1233 union ehci_shadow q, *q_p;
1234 u32 type, *hw_p;
1235 unsigned uframes;
1236
1237 /* don't scan past the live uframe */
1238 frame = now_uframe >> 3;
1239 if (frame == (clock >> 3))
1240 uframes = now_uframe & 0x07;
1241 else {
1242 /* safe to scan the whole frame at once */
1243 now_uframe |= 0x07;
1244 uframes = 8;
1245 }
1246
1247 restart:
1248 /* scan each element in frame's queue for completions */
1249 q_p = &ehci->pshadow [frame];
1250 hw_p = &ehci->periodic [frame];
1251 q.ptr = q_p->ptr;
1252 type = Q_NEXT_TYPE (*hw_p);
1253 modified = 0;
1254
1255 while (q.ptr != 0) {
1256 unsigned uf;
1257 union ehci_shadow temp;
1258
1259 switch (type) {
1260 case Q_TYPE_QH:
1261 /* handle any completions */
1262 temp.qh = qh_get (q.qh);
1263 type = Q_NEXT_TYPE (q.qh->hw_next);
1264 q = q.qh->qh_next;
1265 modified = qh_completions (ehci, temp.qh, regs);
1266 if (unlikely (list_empty (&temp.qh->qtd_list)))
1267 intr_deschedule (ehci, temp.qh, 0);
1268 qh_put (ehci, temp.qh);
1269 break;
1270 case Q_TYPE_FSTN:
1271 /* for "save place" FSTNs, look at QH entries
1272 * in the previous frame for completions.
1273 */
1274 if (q.fstn->hw_prev != EHCI_LIST_END) {
1275 dbg ("ignoring completions from FSTNs");
1276 }
1277 type = Q_NEXT_TYPE (q.fstn->hw_next);
1278 q = q.fstn->fstn_next;
1279 break;
1280 case Q_TYPE_ITD:
1281 /* skip itds for later in the frame */
1282 rmb ();
1283 for (uf = uframes; uf < 8; uf++) {
1284 if (0 == (q.itd->hw_transaction [uf]
1285 & ISO_ACTIVE))
1286 continue;
1287 q_p = &q.itd->itd_next;
1288 hw_p = &q.itd->hw_next;
1289 type = Q_NEXT_TYPE (q.itd->hw_next);
1290 q = *q_p;
1291 break;
1292 }
1293 if (uf != 8)
1294 break;
1295
1296 /* this one's ready ... HC won't cache the
1297 * pointer for much longer, if at all.
1298 */
1299 *q_p = q.itd->itd_next;
1300 *hw_p = q.itd->hw_next;
1301 type = Q_NEXT_TYPE (q.itd->hw_next);
1302 wmb();
1303 modified = itd_complete (ehci, q.itd, regs);
1304 q = *q_p;
1305 break;
1306 #ifdef have_split_iso
1307 case Q_TYPE_SITD:
1308 // nyet!
1309 #endif /* have_split_iso */
1310 default:
1311 dbg ("corrupt type %d frame %d shadow %p",
1312 type, frame, q.ptr);
1313 // BUG ();
1314 q.ptr = 0;
1315 }
1316
1317 /* assume completion callbacks modify the queue */
1318 if (unlikely (modified))
1319 goto restart;
1320 }
1321
1322 /* stop when we catch up to the HC */
1323
1324 // FIXME: this assumes we won't get lapped when
1325 // latencies climb; that should be rare, but...
1326 // detect it, and just go all the way around.
1327 // FLR might help detect this case, so long as latencies
1328 // don't exceed periodic_size msec (default 1.024 sec).
1329
1330 // FIXME: likewise assumes HC doesn't halt mid-scan
1331
1332 if (now_uframe == clock) {
1333 unsigned now;
1334
1335 if (!HCD_IS_RUNNING (ehci->hcd.state))
1336 break;
1337 ehci->next_uframe = now_uframe;
1338 now = readl (&ehci->regs->frame_index) % mod;
1339 if (now_uframe == now)
1340 break;
1341
1342 /* rescan the rest of this frame, then ... */
1343 clock = now;
1344 } else {
1345 now_uframe++;
1346 now_uframe %= mod;
1347 }
1348 }
1349 }
1350