1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MUSB OTG driver host support
4 *
5 * Copyright 2005 Mentor Graphics Corporation
6 * Copyright (C) 2005-2006 by Texas Instruments
7 * Copyright (C) 2006-2007 Nokia Corporation
8 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/dma-mapping.h>
19
20 #include "musb_core.h"
21 #include "musb_host.h"
22 #include "musb_trace.h"
23
24 /* MUSB HOST status 22-mar-2006
25 *
26 * - There's still lots of partial code duplication for fault paths, so
27 * they aren't handled as consistently as they need to be.
28 *
29 * - PIO mostly behaved when last tested.
30 * + including ep0, with all usbtest cases 9, 10
31 * + usbtest 14 (ep0out) doesn't seem to run at all
32 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
33 * configurations, but otherwise double buffering passes basic tests.
34 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
35 *
36 * - DMA (CPPI) ... partially behaves, not currently recommended
37 * + about 1/15 the speed of typical EHCI implementations (PCI)
38 * + RX, all too often reqpkt seems to misbehave after tx
39 * + TX, no known issues (other than evident silicon issue)
40 *
41 * - DMA (Mentor/OMAP) ...has at least toggle update problems
42 *
43 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
44 * starvation ... nothing yet for TX, interrupt, or bulk.
45 *
46 * - Not tested with HNP, but some SRP paths seem to behave.
47 *
48 * NOTE 24-August-2006:
49 *
50 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
51 * extra endpoint for periodic use enabling hub + keybd + mouse. That
52 * mostly works, except that with "usbnet" it's easy to trigger cases
53 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
54 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
55 * although ARP RX wins. (That test was done with a full speed link.)
56 */
57
58
59 /*
60 * NOTE on endpoint usage:
61 *
62 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
63 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
64 * (Yes, bulk _could_ use more of the endpoints than that, and would even
65 * benefit from it.)
66 *
67 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
68 * So far that scheduling is both dumb and optimistic: the endpoint will be
69 * "claimed" until its software queue is no longer refilled. No multiplexing
70 * of transfers between endpoints, or anything clever.
71 */
72
hcd_to_musb(struct usb_hcd * hcd)73 struct musb *hcd_to_musb(struct usb_hcd *hcd)
74 {
75 return *(struct musb **) hcd->hcd_priv;
76 }
77
78
79 static void musb_ep_program(struct musb *musb, u8 epnum,
80 struct urb *urb, int is_out,
81 u8 *buf, u32 offset, u32 len);
82
83 /*
84 * Clear TX fifo. Needed to avoid BABBLE errors.
85 */
musb_h_tx_flush_fifo(struct musb_hw_ep * ep)86 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
87 {
88 struct musb *musb = ep->musb;
89 void __iomem *epio = ep->regs;
90 u16 csr;
91 int retries = 1000;
92
93 csr = musb_readw(epio, MUSB_TXCSR);
94 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
95 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
96 musb_writew(epio, MUSB_TXCSR, csr);
97 csr = musb_readw(epio, MUSB_TXCSR);
98
99 /*
100 * FIXME: sometimes the tx fifo flush failed, it has been
101 * observed during device disconnect on AM335x.
102 *
103 * To reproduce the issue, ensure tx urb(s) are queued when
104 * unplug the usb device which is connected to AM335x usb
105 * host port.
106 *
107 * I found using a usb-ethernet device and running iperf
108 * (client on AM335x) has very high chance to trigger it.
109 *
110 * Better to turn on musb_dbg() in musb_cleanup_urb() with
111 * CPPI enabled to see the issue when aborting the tx channel.
112 */
113 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
114 "Could not flush host TX%d fifo: csr: %04x\n",
115 ep->epnum, csr))
116 return;
117 mdelay(1);
118 }
119 }
120
musb_h_ep0_flush_fifo(struct musb_hw_ep * ep)121 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
122 {
123 void __iomem *epio = ep->regs;
124 u16 csr;
125 int retries = 5;
126
127 /* scrub any data left in the fifo */
128 do {
129 csr = musb_readw(epio, MUSB_TXCSR);
130 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
131 break;
132 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
133 csr = musb_readw(epio, MUSB_TXCSR);
134 udelay(10);
135 } while (--retries);
136
137 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
138 ep->epnum, csr);
139
140 /* and reset for the next transfer */
141 musb_writew(epio, MUSB_TXCSR, 0);
142 }
143
144 /*
145 * Start transmit. Caller is responsible for locking shared resources.
146 * musb must be locked.
147 */
musb_h_tx_start(struct musb_hw_ep * ep)148 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
149 {
150 u16 txcsr;
151
152 /* NOTE: no locks here; caller should lock and select EP */
153 if (ep->epnum) {
154 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
155 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
156 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
157 } else {
158 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
159 musb_writew(ep->regs, MUSB_CSR0, txcsr);
160 }
161
162 }
163
musb_h_tx_dma_start(struct musb_hw_ep * ep)164 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
165 {
166 u16 txcsr;
167
168 /* NOTE: no locks here; caller should lock and select EP */
169 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
170 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
171 if (is_cppi_enabled(ep->musb))
172 txcsr |= MUSB_TXCSR_DMAMODE;
173 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
174 }
175
musb_ep_set_qh(struct musb_hw_ep * ep,int is_in,struct musb_qh * qh)176 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
177 {
178 if (is_in != 0 || ep->is_shared_fifo)
179 ep->in_qh = qh;
180 if (is_in == 0 || ep->is_shared_fifo)
181 ep->out_qh = qh;
182 }
183
musb_ep_get_qh(struct musb_hw_ep * ep,int is_in)184 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
185 {
186 return is_in ? ep->in_qh : ep->out_qh;
187 }
188
189 /*
190 * Start the URB at the front of an endpoint's queue
191 * end must be claimed from the caller.
192 *
193 * Context: controller locked, irqs blocked
194 */
195 static void
musb_start_urb(struct musb * musb,int is_in,struct musb_qh * qh)196 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
197 {
198 u32 len;
199 void __iomem *mbase = musb->mregs;
200 struct urb *urb = next_urb(qh);
201 void *buf = urb->transfer_buffer;
202 u32 offset = 0;
203 struct musb_hw_ep *hw_ep = qh->hw_ep;
204 int epnum = hw_ep->epnum;
205
206 /* initialize software qh state */
207 qh->offset = 0;
208 qh->segsize = 0;
209
210 /* gather right source of data */
211 switch (qh->type) {
212 case USB_ENDPOINT_XFER_CONTROL:
213 /* control transfers always start with SETUP */
214 is_in = 0;
215 musb->ep0_stage = MUSB_EP0_START;
216 buf = urb->setup_packet;
217 len = 8;
218 break;
219 case USB_ENDPOINT_XFER_ISOC:
220 qh->iso_idx = 0;
221 qh->frame = 0;
222 offset = urb->iso_frame_desc[0].offset;
223 len = urb->iso_frame_desc[0].length;
224 break;
225 default: /* bulk, interrupt */
226 /* actual_length may be nonzero on retry paths */
227 buf = urb->transfer_buffer + urb->actual_length;
228 len = urb->transfer_buffer_length - urb->actual_length;
229 }
230
231 trace_musb_urb_start(musb, urb);
232
233 /* Configure endpoint */
234 musb_ep_set_qh(hw_ep, is_in, qh);
235 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
236
237 /* transmit may have more work: start it when it is time */
238 if (is_in)
239 return;
240
241 /* determine if the time is right for a periodic transfer */
242 switch (qh->type) {
243 case USB_ENDPOINT_XFER_ISOC:
244 case USB_ENDPOINT_XFER_INT:
245 musb_dbg(musb, "check whether there's still time for periodic Tx");
246 /* FIXME this doesn't implement that scheduling policy ...
247 * or handle framecounter wrapping
248 */
249 if (1) { /* Always assume URB_ISO_ASAP */
250 /* REVISIT the SOF irq handler shouldn't duplicate
251 * this code; and we don't init urb->start_frame...
252 */
253 qh->frame = 0;
254 goto start;
255 } else {
256 qh->frame = urb->start_frame;
257 /* enable SOF interrupt so we can count down */
258 musb_dbg(musb, "SOF for %d", epnum);
259 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
260 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
261 #endif
262 }
263 break;
264 default:
265 start:
266 musb_dbg(musb, "Start TX%d %s", epnum,
267 hw_ep->tx_channel ? "dma" : "pio");
268
269 if (!hw_ep->tx_channel)
270 musb_h_tx_start(hw_ep);
271 else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
272 musb_h_tx_dma_start(hw_ep);
273 }
274 }
275
276 /* Context: caller owns controller lock, IRQs are blocked */
musb_giveback(struct musb * musb,struct urb * urb,int status)277 static void musb_giveback(struct musb *musb, struct urb *urb, int status)
278 __releases(musb->lock)
279 __acquires(musb->lock)
280 {
281 trace_musb_urb_gb(musb, urb);
282
283 usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
284 spin_unlock(&musb->lock);
285 usb_hcd_giveback_urb(musb->hcd, urb, status);
286 spin_lock(&musb->lock);
287 }
288
289 /*
290 * Advance this hardware endpoint's queue, completing the specified URB and
291 * advancing to either the next URB queued to that qh, or else invalidating
292 * that qh and advancing to the next qh scheduled after the current one.
293 *
294 * Context: caller owns controller lock, IRQs are blocked
295 */
musb_advance_schedule(struct musb * musb,struct urb * urb,struct musb_hw_ep * hw_ep,int is_in)296 static void musb_advance_schedule(struct musb *musb, struct urb *urb,
297 struct musb_hw_ep *hw_ep, int is_in)
298 {
299 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
300 struct musb_hw_ep *ep = qh->hw_ep;
301 int ready = qh->is_ready;
302 int status;
303 u16 toggle;
304
305 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
306
307 /* save toggle eagerly, for paranoia */
308 switch (qh->type) {
309 case USB_ENDPOINT_XFER_BULK:
310 case USB_ENDPOINT_XFER_INT:
311 toggle = musb->io.get_toggle(qh, !is_in);
312 usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
313 break;
314 case USB_ENDPOINT_XFER_ISOC:
315 if (status == 0 && urb->error_count)
316 status = -EXDEV;
317 break;
318 }
319
320 qh->is_ready = 0;
321 musb_giveback(musb, urb, status);
322 qh->is_ready = ready;
323
324 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
325 * invalidate qh as soon as list_empty(&hep->urb_list)
326 */
327 if (list_empty(&qh->hep->urb_list)) {
328 struct list_head *head;
329 struct dma_controller *dma = musb->dma_controller;
330
331 if (is_in) {
332 ep->rx_reinit = 1;
333 if (ep->rx_channel) {
334 dma->channel_release(ep->rx_channel);
335 ep->rx_channel = NULL;
336 }
337 } else {
338 ep->tx_reinit = 1;
339 if (ep->tx_channel) {
340 dma->channel_release(ep->tx_channel);
341 ep->tx_channel = NULL;
342 }
343 }
344
345 /* Clobber old pointers to this qh */
346 musb_ep_set_qh(ep, is_in, NULL);
347 qh->hep->hcpriv = NULL;
348
349 switch (qh->type) {
350
351 case USB_ENDPOINT_XFER_CONTROL:
352 case USB_ENDPOINT_XFER_BULK:
353 /* fifo policy for these lists, except that NAKing
354 * should rotate a qh to the end (for fairness).
355 */
356 if (qh->mux == 1) {
357 head = qh->ring.prev;
358 list_del(&qh->ring);
359 kfree(qh);
360 qh = first_qh(head);
361 break;
362 }
363 fallthrough;
364
365 case USB_ENDPOINT_XFER_ISOC:
366 case USB_ENDPOINT_XFER_INT:
367 /* this is where periodic bandwidth should be
368 * de-allocated if it's tracked and allocated;
369 * and where we'd update the schedule tree...
370 */
371 kfree(qh);
372 qh = NULL;
373 break;
374 }
375 }
376
377 if (qh != NULL && qh->is_ready) {
378 musb_dbg(musb, "... next ep%d %cX urb %p",
379 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
380 musb_start_urb(musb, is_in, qh);
381 }
382 }
383
musb_h_flush_rxfifo(struct musb_hw_ep * hw_ep,u16 csr)384 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
385 {
386 /* we don't want fifo to fill itself again;
387 * ignore dma (various models),
388 * leave toggle alone (may not have been saved yet)
389 */
390 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
391 csr &= ~(MUSB_RXCSR_H_REQPKT
392 | MUSB_RXCSR_H_AUTOREQ
393 | MUSB_RXCSR_AUTOCLEAR);
394
395 /* write 2x to allow double buffering */
396 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
397 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
398
399 /* flush writebuffer */
400 return musb_readw(hw_ep->regs, MUSB_RXCSR);
401 }
402
403 /*
404 * PIO RX for a packet (or part of it).
405 */
406 static bool
musb_host_packet_rx(struct musb * musb,struct urb * urb,u8 epnum,u8 iso_err)407 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
408 {
409 u16 rx_count;
410 u8 *buf;
411 u16 csr;
412 bool done = false;
413 u32 length;
414 int do_flush = 0;
415 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
416 void __iomem *epio = hw_ep->regs;
417 struct musb_qh *qh = hw_ep->in_qh;
418 int pipe = urb->pipe;
419 void *buffer = urb->transfer_buffer;
420
421 /* musb_ep_select(mbase, epnum); */
422 rx_count = musb_readw(epio, MUSB_RXCOUNT);
423 musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
424 urb->transfer_buffer, qh->offset,
425 urb->transfer_buffer_length);
426
427 /* unload FIFO */
428 if (usb_pipeisoc(pipe)) {
429 int status = 0;
430 struct usb_iso_packet_descriptor *d;
431
432 if (iso_err) {
433 status = -EILSEQ;
434 urb->error_count++;
435 }
436
437 d = urb->iso_frame_desc + qh->iso_idx;
438 buf = buffer + d->offset;
439 length = d->length;
440 if (rx_count > length) {
441 if (status == 0) {
442 status = -EOVERFLOW;
443 urb->error_count++;
444 }
445 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
446 do_flush = 1;
447 } else
448 length = rx_count;
449 urb->actual_length += length;
450 d->actual_length = length;
451
452 d->status = status;
453
454 /* see if we are done */
455 done = (++qh->iso_idx >= urb->number_of_packets);
456 } else {
457 /* non-isoch */
458 buf = buffer + qh->offset;
459 length = urb->transfer_buffer_length - qh->offset;
460 if (rx_count > length) {
461 if (urb->status == -EINPROGRESS)
462 urb->status = -EOVERFLOW;
463 musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
464 do_flush = 1;
465 } else
466 length = rx_count;
467 urb->actual_length += length;
468 qh->offset += length;
469
470 /* see if we are done */
471 done = (urb->actual_length == urb->transfer_buffer_length)
472 || (rx_count < qh->maxpacket)
473 || (urb->status != -EINPROGRESS);
474 if (done
475 && (urb->status == -EINPROGRESS)
476 && (urb->transfer_flags & URB_SHORT_NOT_OK)
477 && (urb->actual_length
478 < urb->transfer_buffer_length))
479 urb->status = -EREMOTEIO;
480 }
481
482 musb_read_fifo(hw_ep, length, buf);
483
484 csr = musb_readw(epio, MUSB_RXCSR);
485 csr |= MUSB_RXCSR_H_WZC_BITS;
486 if (unlikely(do_flush))
487 musb_h_flush_rxfifo(hw_ep, csr);
488 else {
489 /* REVISIT this assumes AUTOCLEAR is never set */
490 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
491 if (!done)
492 csr |= MUSB_RXCSR_H_REQPKT;
493 musb_writew(epio, MUSB_RXCSR, csr);
494 }
495
496 return done;
497 }
498
499 /* we don't always need to reinit a given side of an endpoint...
500 * when we do, use tx/rx reinit routine and then construct a new CSR
501 * to address data toggle, NYET, and DMA or PIO.
502 *
503 * it's possible that driver bugs (especially for DMA) or aborting a
504 * transfer might have left the endpoint busier than it should be.
505 * the busy/not-empty tests are basically paranoia.
506 */
507 static void
musb_rx_reinit(struct musb * musb,struct musb_qh * qh,u8 epnum)508 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
509 {
510 struct musb_hw_ep *ep = musb->endpoints + epnum;
511 u16 csr;
512
513 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
514 * That always uses tx_reinit since ep0 repurposes TX register
515 * offsets; the initial SETUP packet is also a kind of OUT.
516 */
517
518 /* if programmed for Tx, put it in RX mode */
519 if (ep->is_shared_fifo) {
520 csr = musb_readw(ep->regs, MUSB_TXCSR);
521 if (csr & MUSB_TXCSR_MODE) {
522 musb_h_tx_flush_fifo(ep);
523 csr = musb_readw(ep->regs, MUSB_TXCSR);
524 musb_writew(ep->regs, MUSB_TXCSR,
525 csr | MUSB_TXCSR_FRCDATATOG);
526 }
527
528 /*
529 * Clear the MODE bit (and everything else) to enable Rx.
530 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
531 */
532 if (csr & MUSB_TXCSR_DMAMODE)
533 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
534 musb_writew(ep->regs, MUSB_TXCSR, 0);
535
536 /* scrub all previous state, clearing toggle */
537 }
538 csr = musb_readw(ep->regs, MUSB_RXCSR);
539 if (csr & MUSB_RXCSR_RXPKTRDY)
540 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
541 musb_readw(ep->regs, MUSB_RXCOUNT));
542
543 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
544
545 /* target addr and (for multipoint) hub addr/port */
546 if (musb->is_multipoint) {
547 musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
548 musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
549 musb_write_rxhubport(musb, epnum, qh->h_port_reg);
550 } else
551 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
552
553 /* protocol/endpoint, interval/NAKlimit, i/o size */
554 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
555 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
556 /* NOTE: bulk combining rewrites high bits of maxpacket */
557 /* Set RXMAXP with the FIFO size of the endpoint
558 * to disable double buffer mode.
559 */
560 musb_writew(ep->regs, MUSB_RXMAXP,
561 qh->maxpacket | ((qh->hb_mult - 1) << 11));
562
563 ep->rx_reinit = 0;
564 }
565
musb_tx_dma_set_mode_mentor(struct musb_hw_ep * hw_ep,struct musb_qh * qh,u32 * length,u8 * mode)566 static void musb_tx_dma_set_mode_mentor(struct musb_hw_ep *hw_ep,
567 struct musb_qh *qh,
568 u32 *length, u8 *mode)
569 {
570 struct dma_channel *channel = hw_ep->tx_channel;
571 void __iomem *epio = hw_ep->regs;
572 u16 pkt_size = qh->maxpacket;
573 u16 csr;
574
575 if (*length > channel->max_len)
576 *length = channel->max_len;
577
578 csr = musb_readw(epio, MUSB_TXCSR);
579 if (*length > pkt_size) {
580 *mode = 1;
581 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
582 /* autoset shouldn't be set in high bandwidth */
583 /*
584 * Enable Autoset according to table
585 * below
586 * bulk_split hb_mult Autoset_Enable
587 * 0 1 Yes(Normal)
588 * 0 >1 No(High BW ISO)
589 * 1 1 Yes(HS bulk)
590 * 1 >1 Yes(FS bulk)
591 */
592 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
593 can_bulk_split(hw_ep->musb, qh->type)))
594 csr |= MUSB_TXCSR_AUTOSET;
595 } else {
596 *mode = 0;
597 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
598 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
599 }
600 channel->desired_mode = *mode;
601 musb_writew(epio, MUSB_TXCSR, csr);
602 }
603
musb_tx_dma_set_mode_cppi_tusb(struct musb_hw_ep * hw_ep,struct urb * urb,u8 * mode)604 static void musb_tx_dma_set_mode_cppi_tusb(struct musb_hw_ep *hw_ep,
605 struct urb *urb,
606 u8 *mode)
607 {
608 struct dma_channel *channel = hw_ep->tx_channel;
609
610 channel->actual_len = 0;
611
612 /*
613 * TX uses "RNDIS" mode automatically but needs help
614 * to identify the zero-length-final-packet case.
615 */
616 *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
617 }
618
musb_tx_dma_program(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,u32 offset,u32 length)619 static bool musb_tx_dma_program(struct dma_controller *dma,
620 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
621 struct urb *urb, u32 offset, u32 length)
622 {
623 struct dma_channel *channel = hw_ep->tx_channel;
624 u16 pkt_size = qh->maxpacket;
625 u8 mode;
626
627 if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
628 musb_tx_dma_set_mode_mentor(hw_ep, qh,
629 &length, &mode);
630 else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
631 musb_tx_dma_set_mode_cppi_tusb(hw_ep, urb, &mode);
632 else
633 return false;
634
635 qh->segsize = length;
636
637 /*
638 * Ensure the data reaches to main memory before starting
639 * DMA transfer
640 */
641 wmb();
642
643 if (!dma->channel_program(channel, pkt_size, mode,
644 urb->transfer_dma + offset, length)) {
645 void __iomem *epio = hw_ep->regs;
646 u16 csr;
647
648 dma->channel_release(channel);
649 hw_ep->tx_channel = NULL;
650
651 csr = musb_readw(epio, MUSB_TXCSR);
652 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
653 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
654 return false;
655 }
656 return true;
657 }
658
659 /*
660 * Program an HDRC endpoint as per the given URB
661 * Context: irqs blocked, controller lock held
662 */
musb_ep_program(struct musb * musb,u8 epnum,struct urb * urb,int is_out,u8 * buf,u32 offset,u32 len)663 static void musb_ep_program(struct musb *musb, u8 epnum,
664 struct urb *urb, int is_out,
665 u8 *buf, u32 offset, u32 len)
666 {
667 struct dma_controller *dma_controller;
668 struct dma_channel *dma_channel;
669 u8 dma_ok;
670 void __iomem *mbase = musb->mregs;
671 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
672 void __iomem *epio = hw_ep->regs;
673 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
674 u16 packet_sz = qh->maxpacket;
675 u8 use_dma = 1;
676 u16 csr;
677
678 musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
679 "h_addr%02x h_port%02x bytes %d",
680 is_out ? "-->" : "<--",
681 epnum, urb, urb->dev->speed,
682 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
683 qh->h_addr_reg, qh->h_port_reg,
684 len);
685
686 musb_ep_select(mbase, epnum);
687
688 if (is_out && !len) {
689 use_dma = 0;
690 csr = musb_readw(epio, MUSB_TXCSR);
691 csr &= ~MUSB_TXCSR_DMAENAB;
692 musb_writew(epio, MUSB_TXCSR, csr);
693 hw_ep->tx_channel = NULL;
694 }
695
696 /* candidate for DMA? */
697 dma_controller = musb->dma_controller;
698 if (use_dma && is_dma_capable() && epnum && dma_controller) {
699 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
700 if (!dma_channel) {
701 dma_channel = dma_controller->channel_alloc(
702 dma_controller, hw_ep, is_out);
703 if (is_out)
704 hw_ep->tx_channel = dma_channel;
705 else
706 hw_ep->rx_channel = dma_channel;
707 }
708 } else
709 dma_channel = NULL;
710
711 /* make sure we clear DMAEnab, autoSet bits from previous run */
712
713 /* OUT/transmit/EP0 or IN/receive? */
714 if (is_out) {
715 u16 csr;
716 u16 int_txe;
717 u16 load_count;
718
719 csr = musb_readw(epio, MUSB_TXCSR);
720
721 /* disable interrupt in case we flush */
722 int_txe = musb->intrtxe;
723 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
724
725 /* general endpoint setup */
726 if (epnum) {
727 /* flush all old state, set default */
728 /*
729 * We could be flushing valid
730 * packets in double buffering
731 * case
732 */
733 if (!hw_ep->tx_double_buffered)
734 musb_h_tx_flush_fifo(hw_ep);
735
736 /*
737 * We must not clear the DMAMODE bit before or in
738 * the same cycle with the DMAENAB bit, so we clear
739 * the latter first...
740 */
741 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
742 | MUSB_TXCSR_AUTOSET
743 | MUSB_TXCSR_DMAENAB
744 | MUSB_TXCSR_FRCDATATOG
745 | MUSB_TXCSR_H_RXSTALL
746 | MUSB_TXCSR_H_ERROR
747 | MUSB_TXCSR_TXPKTRDY
748 );
749 csr |= MUSB_TXCSR_MODE;
750
751 if (!hw_ep->tx_double_buffered)
752 csr |= musb->io.set_toggle(qh, is_out, urb);
753
754 musb_writew(epio, MUSB_TXCSR, csr);
755 /* REVISIT may need to clear FLUSHFIFO ... */
756 csr &= ~MUSB_TXCSR_DMAMODE;
757 musb_writew(epio, MUSB_TXCSR, csr);
758 csr = musb_readw(epio, MUSB_TXCSR);
759 } else {
760 /* endpoint 0: just flush */
761 musb_h_ep0_flush_fifo(hw_ep);
762 }
763
764 /* target addr and (for multipoint) hub addr/port */
765 if (musb->is_multipoint) {
766 musb_write_txfunaddr(musb, epnum, qh->addr_reg);
767 musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
768 musb_write_txhubport(musb, epnum, qh->h_port_reg);
769 /* FIXME if !epnum, do the same for RX ... */
770 } else
771 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
772
773 /* protocol/endpoint/interval/NAKlimit */
774 if (epnum) {
775 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
776 if (can_bulk_split(musb, qh->type)) {
777 qh->hb_mult = hw_ep->max_packet_sz_tx
778 / packet_sz;
779 musb_writew(epio, MUSB_TXMAXP, packet_sz
780 | ((qh->hb_mult) - 1) << 11);
781 } else {
782 musb_writew(epio, MUSB_TXMAXP,
783 qh->maxpacket |
784 ((qh->hb_mult - 1) << 11));
785 }
786 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
787 } else {
788 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
789 if (musb->is_multipoint)
790 musb_writeb(epio, MUSB_TYPE0,
791 qh->type_reg);
792 }
793
794 if (can_bulk_split(musb, qh->type))
795 load_count = min((u32) hw_ep->max_packet_sz_tx,
796 len);
797 else
798 load_count = min((u32) packet_sz, len);
799
800 if (dma_channel && musb_tx_dma_program(dma_controller,
801 hw_ep, qh, urb, offset, len))
802 load_count = 0;
803
804 if (load_count) {
805 /* PIO to load FIFO */
806 qh->segsize = load_count;
807 if (!buf) {
808 sg_miter_start(&qh->sg_miter, urb->sg, 1,
809 SG_MITER_ATOMIC
810 | SG_MITER_FROM_SG);
811 if (!sg_miter_next(&qh->sg_miter)) {
812 dev_err(musb->controller,
813 "error: sg"
814 "list empty\n");
815 sg_miter_stop(&qh->sg_miter);
816 goto finish;
817 }
818 buf = qh->sg_miter.addr + urb->sg->offset +
819 urb->actual_length;
820 load_count = min_t(u32, load_count,
821 qh->sg_miter.length);
822 musb_write_fifo(hw_ep, load_count, buf);
823 qh->sg_miter.consumed = load_count;
824 sg_miter_stop(&qh->sg_miter);
825 } else
826 musb_write_fifo(hw_ep, load_count, buf);
827 }
828 finish:
829 /* re-enable interrupt */
830 musb_writew(mbase, MUSB_INTRTXE, int_txe);
831
832 /* IN/receive */
833 } else {
834 u16 csr = 0;
835
836 if (hw_ep->rx_reinit) {
837 musb_rx_reinit(musb, qh, epnum);
838 csr |= musb->io.set_toggle(qh, is_out, urb);
839
840 if (qh->type == USB_ENDPOINT_XFER_INT)
841 csr |= MUSB_RXCSR_DISNYET;
842
843 } else {
844 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
845
846 if (csr & (MUSB_RXCSR_RXPKTRDY
847 | MUSB_RXCSR_DMAENAB
848 | MUSB_RXCSR_H_REQPKT))
849 ERR("broken !rx_reinit, ep%d csr %04x\n",
850 hw_ep->epnum, csr);
851
852 /* scrub any stale state, leaving toggle alone */
853 csr &= MUSB_RXCSR_DISNYET;
854 }
855
856 /* kick things off */
857
858 if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
859 /* Candidate for DMA */
860 dma_channel->actual_len = 0L;
861 qh->segsize = len;
862
863 /* AUTOREQ is in a DMA register */
864 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
865 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
866
867 /*
868 * Unless caller treats short RX transfers as
869 * errors, we dare not queue multiple transfers.
870 */
871 dma_ok = dma_controller->channel_program(dma_channel,
872 packet_sz, !(urb->transfer_flags &
873 URB_SHORT_NOT_OK),
874 urb->transfer_dma + offset,
875 qh->segsize);
876 if (!dma_ok) {
877 dma_controller->channel_release(dma_channel);
878 hw_ep->rx_channel = dma_channel = NULL;
879 } else
880 csr |= MUSB_RXCSR_DMAENAB;
881 }
882
883 csr |= MUSB_RXCSR_H_REQPKT;
884 musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
885 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
886 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
887 }
888 }
889
890 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
891 * the end; avoids starvation for other endpoints.
892 */
musb_bulk_nak_timeout(struct musb * musb,struct musb_hw_ep * ep,int is_in)893 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
894 int is_in)
895 {
896 struct dma_channel *dma;
897 struct urb *urb;
898 void __iomem *mbase = musb->mregs;
899 void __iomem *epio = ep->regs;
900 struct musb_qh *cur_qh, *next_qh;
901 u16 rx_csr, tx_csr;
902 u16 toggle;
903
904 musb_ep_select(mbase, ep->epnum);
905 if (is_in) {
906 dma = is_dma_capable() ? ep->rx_channel : NULL;
907
908 /*
909 * Need to stop the transaction by clearing REQPKT first
910 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
911 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
912 */
913 rx_csr = musb_readw(epio, MUSB_RXCSR);
914 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
915 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
916 musb_writew(epio, MUSB_RXCSR, rx_csr);
917 rx_csr &= ~MUSB_RXCSR_DATAERROR;
918 musb_writew(epio, MUSB_RXCSR, rx_csr);
919
920 cur_qh = first_qh(&musb->in_bulk);
921 } else {
922 dma = is_dma_capable() ? ep->tx_channel : NULL;
923
924 /* clear nak timeout bit */
925 tx_csr = musb_readw(epio, MUSB_TXCSR);
926 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
927 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
928 musb_writew(epio, MUSB_TXCSR, tx_csr);
929
930 cur_qh = first_qh(&musb->out_bulk);
931 }
932 if (cur_qh) {
933 urb = next_urb(cur_qh);
934 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
935 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
936 musb->dma_controller->channel_abort(dma);
937 urb->actual_length += dma->actual_len;
938 dma->actual_len = 0L;
939 }
940 toggle = musb->io.get_toggle(cur_qh, !is_in);
941 usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
942
943 if (is_in) {
944 /* move cur_qh to end of queue */
945 list_move_tail(&cur_qh->ring, &musb->in_bulk);
946
947 /* get the next qh from musb->in_bulk */
948 next_qh = first_qh(&musb->in_bulk);
949
950 /* set rx_reinit and schedule the next qh */
951 ep->rx_reinit = 1;
952 } else {
953 /* move cur_qh to end of queue */
954 list_move_tail(&cur_qh->ring, &musb->out_bulk);
955
956 /* get the next qh from musb->out_bulk */
957 next_qh = first_qh(&musb->out_bulk);
958
959 /* set tx_reinit and schedule the next qh */
960 ep->tx_reinit = 1;
961 }
962
963 if (next_qh)
964 musb_start_urb(musb, is_in, next_qh);
965 }
966 }
967
968 /*
969 * Service the default endpoint (ep0) as host.
970 * Return true until it's time to start the status stage.
971 */
musb_h_ep0_continue(struct musb * musb,u16 len,struct urb * urb)972 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
973 {
974 bool more = false;
975 u8 *fifo_dest = NULL;
976 u16 fifo_count = 0;
977 struct musb_hw_ep *hw_ep = musb->control_ep;
978 struct musb_qh *qh = hw_ep->in_qh;
979 struct usb_ctrlrequest *request;
980
981 switch (musb->ep0_stage) {
982 case MUSB_EP0_IN:
983 fifo_dest = urb->transfer_buffer + urb->actual_length;
984 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
985 urb->actual_length);
986 if (fifo_count < len)
987 urb->status = -EOVERFLOW;
988
989 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
990
991 urb->actual_length += fifo_count;
992 if (len < qh->maxpacket) {
993 /* always terminate on short read; it's
994 * rarely reported as an error.
995 */
996 } else if (urb->actual_length <
997 urb->transfer_buffer_length)
998 more = true;
999 break;
1000 case MUSB_EP0_START:
1001 request = (struct usb_ctrlrequest *) urb->setup_packet;
1002
1003 if (!request->wLength) {
1004 musb_dbg(musb, "start no-DATA");
1005 break;
1006 } else if (request->bRequestType & USB_DIR_IN) {
1007 musb_dbg(musb, "start IN-DATA");
1008 musb->ep0_stage = MUSB_EP0_IN;
1009 more = true;
1010 break;
1011 } else {
1012 musb_dbg(musb, "start OUT-DATA");
1013 musb->ep0_stage = MUSB_EP0_OUT;
1014 more = true;
1015 }
1016 fallthrough;
1017 case MUSB_EP0_OUT:
1018 fifo_count = min_t(size_t, qh->maxpacket,
1019 urb->transfer_buffer_length -
1020 urb->actual_length);
1021 if (fifo_count) {
1022 fifo_dest = (u8 *) (urb->transfer_buffer
1023 + urb->actual_length);
1024 musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
1025 fifo_count,
1026 (fifo_count == 1) ? "" : "s",
1027 fifo_dest);
1028 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1029
1030 urb->actual_length += fifo_count;
1031 more = true;
1032 }
1033 break;
1034 default:
1035 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1036 break;
1037 }
1038
1039 return more;
1040 }
1041
1042 /*
1043 * Handle default endpoint interrupt as host. Only called in IRQ time
1044 * from musb_interrupt().
1045 *
1046 * called with controller irqlocked
1047 */
musb_h_ep0_irq(struct musb * musb)1048 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1049 {
1050 struct urb *urb;
1051 u16 csr, len;
1052 int status = 0;
1053 void __iomem *mbase = musb->mregs;
1054 struct musb_hw_ep *hw_ep = musb->control_ep;
1055 void __iomem *epio = hw_ep->regs;
1056 struct musb_qh *qh = hw_ep->in_qh;
1057 bool complete = false;
1058 irqreturn_t retval = IRQ_NONE;
1059
1060 /* ep0 only has one queue, "in" */
1061 urb = next_urb(qh);
1062
1063 musb_ep_select(mbase, 0);
1064 csr = musb_readw(epio, MUSB_CSR0);
1065 len = (csr & MUSB_CSR0_RXPKTRDY)
1066 ? musb_readb(epio, MUSB_COUNT0)
1067 : 0;
1068
1069 musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1070 csr, qh, len, urb, musb->ep0_stage);
1071
1072 /* if we just did status stage, we are done */
1073 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1074 retval = IRQ_HANDLED;
1075 complete = true;
1076 }
1077
1078 /* prepare status */
1079 if (csr & MUSB_CSR0_H_RXSTALL) {
1080 musb_dbg(musb, "STALLING ENDPOINT");
1081 status = -EPIPE;
1082
1083 } else if (csr & MUSB_CSR0_H_ERROR) {
1084 musb_dbg(musb, "no response, csr0 %04x", csr);
1085 status = -EPROTO;
1086
1087 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1088 musb_dbg(musb, "control NAK timeout");
1089
1090 /* NOTE: this code path would be a good place to PAUSE a
1091 * control transfer, if another one is queued, so that
1092 * ep0 is more likely to stay busy. That's already done
1093 * for bulk RX transfers.
1094 *
1095 * if (qh->ring.next != &musb->control), then
1096 * we have a candidate... NAKing is *NOT* an error
1097 */
1098 musb_writew(epio, MUSB_CSR0, 0);
1099 retval = IRQ_HANDLED;
1100 }
1101
1102 if (status) {
1103 musb_dbg(musb, "aborting");
1104 retval = IRQ_HANDLED;
1105 if (urb)
1106 urb->status = status;
1107 complete = true;
1108
1109 /* use the proper sequence to abort the transfer */
1110 if (csr & MUSB_CSR0_H_REQPKT) {
1111 csr &= ~MUSB_CSR0_H_REQPKT;
1112 musb_writew(epio, MUSB_CSR0, csr);
1113 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1114 musb_writew(epio, MUSB_CSR0, csr);
1115 } else {
1116 musb_h_ep0_flush_fifo(hw_ep);
1117 }
1118
1119 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1120
1121 /* clear it */
1122 musb_writew(epio, MUSB_CSR0, 0);
1123 }
1124
1125 if (unlikely(!urb)) {
1126 /* stop endpoint since we have no place for its data, this
1127 * SHOULD NEVER HAPPEN! */
1128 ERR("no URB for end 0\n");
1129
1130 musb_h_ep0_flush_fifo(hw_ep);
1131 goto done;
1132 }
1133
1134 if (!complete) {
1135 /* call common logic and prepare response */
1136 if (musb_h_ep0_continue(musb, len, urb)) {
1137 /* more packets required */
1138 csr = (MUSB_EP0_IN == musb->ep0_stage)
1139 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1140 } else {
1141 /* data transfer complete; perform status phase */
1142 if (usb_pipeout(urb->pipe)
1143 || !urb->transfer_buffer_length)
1144 csr = MUSB_CSR0_H_STATUSPKT
1145 | MUSB_CSR0_H_REQPKT;
1146 else
1147 csr = MUSB_CSR0_H_STATUSPKT
1148 | MUSB_CSR0_TXPKTRDY;
1149
1150 /* disable ping token in status phase */
1151 csr |= MUSB_CSR0_H_DIS_PING;
1152
1153 /* flag status stage */
1154 musb->ep0_stage = MUSB_EP0_STATUS;
1155
1156 musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
1157
1158 }
1159 musb_writew(epio, MUSB_CSR0, csr);
1160 retval = IRQ_HANDLED;
1161 } else
1162 musb->ep0_stage = MUSB_EP0_IDLE;
1163
1164 /* call completion handler if done */
1165 if (complete)
1166 musb_advance_schedule(musb, urb, hw_ep, 1);
1167 done:
1168 return retval;
1169 }
1170
1171
1172 #ifdef CONFIG_USB_INVENTRA_DMA
1173
1174 /* Host side TX (OUT) using Mentor DMA works as follows:
1175 submit_urb ->
1176 - if queue was empty, Program Endpoint
1177 - ... which starts DMA to fifo in mode 1 or 0
1178
1179 DMA Isr (transfer complete) -> TxAvail()
1180 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1181 only in musb_cleanup_urb)
1182 - TxPktRdy has to be set in mode 0 or for
1183 short packets in mode 1.
1184 */
1185
1186 #endif
1187
1188 /* Service a Tx-Available or dma completion irq for the endpoint */
musb_host_tx(struct musb * musb,u8 epnum)1189 void musb_host_tx(struct musb *musb, u8 epnum)
1190 {
1191 int pipe;
1192 bool done = false;
1193 u16 tx_csr;
1194 size_t length = 0;
1195 size_t offset = 0;
1196 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1197 void __iomem *epio = hw_ep->regs;
1198 struct musb_qh *qh = hw_ep->out_qh;
1199 struct urb *urb = next_urb(qh);
1200 u32 status = 0;
1201 void __iomem *mbase = musb->mregs;
1202 struct dma_channel *dma;
1203 bool transfer_pending = false;
1204
1205 musb_ep_select(mbase, epnum);
1206 tx_csr = musb_readw(epio, MUSB_TXCSR);
1207
1208 /* with CPPI, DMA sometimes triggers "extra" irqs */
1209 if (!urb) {
1210 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1211 return;
1212 }
1213
1214 pipe = urb->pipe;
1215 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1216 trace_musb_urb_tx(musb, urb);
1217 musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
1218 dma ? ", dma" : "");
1219
1220 /* check for errors */
1221 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1222 /* dma was disabled, fifo flushed */
1223 musb_dbg(musb, "TX end %d stall", epnum);
1224
1225 /* stall; record URB status */
1226 status = -EPIPE;
1227
1228 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1229 /* (NON-ISO) dma was disabled, fifo flushed */
1230 musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
1231
1232 status = -ETIMEDOUT;
1233
1234 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1235 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1236 && !list_is_singular(&musb->out_bulk)) {
1237 musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
1238 musb_bulk_nak_timeout(musb, hw_ep, 0);
1239 } else {
1240 musb_dbg(musb, "TX ep%d device not responding", epnum);
1241 /* NOTE: this code path would be a good place to PAUSE a
1242 * transfer, if there's some other (nonperiodic) tx urb
1243 * that could use this fifo. (dma complicates it...)
1244 * That's already done for bulk RX transfers.
1245 *
1246 * if (bulk && qh->ring.next != &musb->out_bulk), then
1247 * we have a candidate... NAKing is *NOT* an error
1248 */
1249 musb_ep_select(mbase, epnum);
1250 musb_writew(epio, MUSB_TXCSR,
1251 MUSB_TXCSR_H_WZC_BITS
1252 | MUSB_TXCSR_TXPKTRDY);
1253 }
1254 return;
1255 }
1256
1257 done:
1258 if (status) {
1259 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1260 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1261 musb->dma_controller->channel_abort(dma);
1262 }
1263
1264 /* do the proper sequence to abort the transfer in the
1265 * usb core; the dma engine should already be stopped.
1266 */
1267 musb_h_tx_flush_fifo(hw_ep);
1268 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1269 | MUSB_TXCSR_DMAENAB
1270 | MUSB_TXCSR_H_ERROR
1271 | MUSB_TXCSR_H_RXSTALL
1272 | MUSB_TXCSR_H_NAKTIMEOUT
1273 );
1274
1275 musb_ep_select(mbase, epnum);
1276 musb_writew(epio, MUSB_TXCSR, tx_csr);
1277 /* REVISIT may need to clear FLUSHFIFO ... */
1278 musb_writew(epio, MUSB_TXCSR, tx_csr);
1279 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1280
1281 done = true;
1282 }
1283
1284 /* second cppi case */
1285 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1286 musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1287 return;
1288 }
1289
1290 if (is_dma_capable() && dma && !status) {
1291 /*
1292 * DMA has completed. But if we're using DMA mode 1 (multi
1293 * packet DMA), we need a terminal TXPKTRDY interrupt before
1294 * we can consider this transfer completed, lest we trash
1295 * its last packet when writing the next URB's data. So we
1296 * switch back to mode 0 to get that interrupt; we'll come
1297 * back here once it happens.
1298 */
1299 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1300 /*
1301 * We shouldn't clear DMAMODE with DMAENAB set; so
1302 * clear them in a safe order. That should be OK
1303 * once TXPKTRDY has been set (and I've never seen
1304 * it being 0 at this moment -- DMA interrupt latency
1305 * is significant) but if it hasn't been then we have
1306 * no choice but to stop being polite and ignore the
1307 * programmer's guide... :-)
1308 *
1309 * Note that we must write TXCSR with TXPKTRDY cleared
1310 * in order not to re-trigger the packet send (this bit
1311 * can't be cleared by CPU), and there's another caveat:
1312 * TXPKTRDY may be set shortly and then cleared in the
1313 * double-buffered FIFO mode, so we do an extra TXCSR
1314 * read for debouncing...
1315 */
1316 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1317 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1318 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1319 MUSB_TXCSR_TXPKTRDY);
1320 musb_writew(epio, MUSB_TXCSR,
1321 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1322 }
1323 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1324 MUSB_TXCSR_TXPKTRDY);
1325 musb_writew(epio, MUSB_TXCSR,
1326 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1327
1328 /*
1329 * There is no guarantee that we'll get an interrupt
1330 * after clearing DMAMODE as we might have done this
1331 * too late (after TXPKTRDY was cleared by controller).
1332 * Re-read TXCSR as we have spoiled its previous value.
1333 */
1334 tx_csr = musb_readw(epio, MUSB_TXCSR);
1335 }
1336
1337 /*
1338 * We may get here from a DMA completion or TXPKTRDY interrupt.
1339 * In any case, we must check the FIFO status here and bail out
1340 * only if the FIFO still has data -- that should prevent the
1341 * "missed" TXPKTRDY interrupts and deal with double-buffered
1342 * FIFO mode too...
1343 */
1344 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1345 musb_dbg(musb,
1346 "DMA complete but FIFO not empty, CSR %04x",
1347 tx_csr);
1348 return;
1349 }
1350 }
1351
1352 if (!status || dma || usb_pipeisoc(pipe)) {
1353 if (dma)
1354 length = dma->actual_len;
1355 else
1356 length = qh->segsize;
1357 qh->offset += length;
1358
1359 if (usb_pipeisoc(pipe)) {
1360 struct usb_iso_packet_descriptor *d;
1361
1362 d = urb->iso_frame_desc + qh->iso_idx;
1363 d->actual_length = length;
1364 d->status = status;
1365 if (++qh->iso_idx >= urb->number_of_packets) {
1366 done = true;
1367 } else {
1368 d++;
1369 offset = d->offset;
1370 length = d->length;
1371 }
1372 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1373 done = true;
1374 } else {
1375 /* see if we need to send more data, or ZLP */
1376 if (qh->segsize < qh->maxpacket)
1377 done = true;
1378 else if (qh->offset == urb->transfer_buffer_length
1379 && !(urb->transfer_flags
1380 & URB_ZERO_PACKET))
1381 done = true;
1382 if (!done) {
1383 offset = qh->offset;
1384 length = urb->transfer_buffer_length - offset;
1385 transfer_pending = true;
1386 }
1387 }
1388 }
1389
1390 /* urb->status != -EINPROGRESS means request has been faulted,
1391 * so we must abort this transfer after cleanup
1392 */
1393 if (urb->status != -EINPROGRESS) {
1394 done = true;
1395 if (status == 0)
1396 status = urb->status;
1397 }
1398
1399 if (done) {
1400 /* set status */
1401 urb->status = status;
1402 urb->actual_length = qh->offset;
1403 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1404 return;
1405 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1406 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1407 offset, length)) {
1408 if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1409 musb_h_tx_dma_start(hw_ep);
1410 return;
1411 }
1412 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1413 musb_dbg(musb, "not complete, but DMA enabled?");
1414 return;
1415 }
1416
1417 /*
1418 * PIO: start next packet in this URB.
1419 *
1420 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1421 * (and presumably, FIFO is not half-full) we should write *two*
1422 * packets before updating TXCSR; other docs disagree...
1423 */
1424 if (length > qh->maxpacket)
1425 length = qh->maxpacket;
1426 /* Unmap the buffer so that CPU can use it */
1427 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1428
1429 /*
1430 * We need to map sg if the transfer_buffer is
1431 * NULL.
1432 */
1433 if (!urb->transfer_buffer) {
1434 /* sg_miter_start is already done in musb_ep_program */
1435 if (!sg_miter_next(&qh->sg_miter)) {
1436 dev_err(musb->controller, "error: sg list empty\n");
1437 sg_miter_stop(&qh->sg_miter);
1438 status = -EINVAL;
1439 goto done;
1440 }
1441 length = min_t(u32, length, qh->sg_miter.length);
1442 musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
1443 qh->sg_miter.consumed = length;
1444 sg_miter_stop(&qh->sg_miter);
1445 } else {
1446 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1447 }
1448
1449 qh->segsize = length;
1450
1451 musb_ep_select(mbase, epnum);
1452 musb_writew(epio, MUSB_TXCSR,
1453 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1454 }
1455
1456 #ifdef CONFIG_USB_TI_CPPI41_DMA
1457 /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
musb_rx_dma_iso_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len)1458 static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1459 struct musb_hw_ep *hw_ep,
1460 struct musb_qh *qh,
1461 struct urb *urb,
1462 size_t len)
1463 {
1464 struct dma_channel *channel = hw_ep->rx_channel;
1465 void __iomem *epio = hw_ep->regs;
1466 dma_addr_t *buf;
1467 u32 length;
1468 u16 val;
1469
1470 buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1471 (u32)urb->transfer_dma;
1472
1473 length = urb->iso_frame_desc[qh->iso_idx].length;
1474
1475 val = musb_readw(epio, MUSB_RXCSR);
1476 val |= MUSB_RXCSR_DMAENAB;
1477 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1478
1479 return dma->channel_program(channel, qh->maxpacket, 0,
1480 (u32)buf, length);
1481 }
1482 #else
musb_rx_dma_iso_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len)1483 static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1484 struct musb_hw_ep *hw_ep,
1485 struct musb_qh *qh,
1486 struct urb *urb,
1487 size_t len)
1488 {
1489 return false;
1490 }
1491 #endif
1492
1493 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1494 defined(CONFIG_USB_TI_CPPI41_DMA)
1495 /* Host side RX (IN) using Mentor DMA works as follows:
1496 submit_urb ->
1497 - if queue was empty, ProgramEndpoint
1498 - first IN token is sent out (by setting ReqPkt)
1499 LinuxIsr -> RxReady()
1500 /\ => first packet is received
1501 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1502 | -> DMA Isr (transfer complete) -> RxReady()
1503 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1504 | - if urb not complete, send next IN token (ReqPkt)
1505 | | else complete urb.
1506 | |
1507 ---------------------------
1508 *
1509 * Nuances of mode 1:
1510 * For short packets, no ack (+RxPktRdy) is sent automatically
1511 * (even if AutoClear is ON)
1512 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1513 * automatically => major problem, as collecting the next packet becomes
1514 * difficult. Hence mode 1 is not used.
1515 *
1516 * REVISIT
1517 * All we care about at this driver level is that
1518 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1519 * (b) termination conditions are: short RX, or buffer full;
1520 * (c) fault modes include
1521 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1522 * (and that endpoint's dma queue stops immediately)
1523 * - overflow (full, PLUS more bytes in the terminal packet)
1524 *
1525 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1526 * thus be a great candidate for using mode 1 ... for all but the
1527 * last packet of one URB's transfer.
1528 */
musb_rx_dma_inventra_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len)1529 static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1530 struct musb_hw_ep *hw_ep,
1531 struct musb_qh *qh,
1532 struct urb *urb,
1533 size_t len)
1534 {
1535 struct dma_channel *channel = hw_ep->rx_channel;
1536 void __iomem *epio = hw_ep->regs;
1537 u16 val;
1538 int pipe;
1539 bool done;
1540
1541 pipe = urb->pipe;
1542
1543 if (usb_pipeisoc(pipe)) {
1544 struct usb_iso_packet_descriptor *d;
1545
1546 d = urb->iso_frame_desc + qh->iso_idx;
1547 d->actual_length = len;
1548
1549 /* even if there was an error, we did the dma
1550 * for iso_frame_desc->length
1551 */
1552 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1553 d->status = 0;
1554
1555 if (++qh->iso_idx >= urb->number_of_packets) {
1556 done = true;
1557 } else {
1558 /* REVISIT: Why ignore return value here? */
1559 if (musb_dma_cppi41(hw_ep->musb))
1560 done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1561 urb, len);
1562 done = false;
1563 }
1564
1565 } else {
1566 /* done if urb buffer is full or short packet is recd */
1567 done = (urb->actual_length + len >=
1568 urb->transfer_buffer_length
1569 || channel->actual_len < qh->maxpacket
1570 || channel->rx_packet_done);
1571 }
1572
1573 /* send IN token for next packet, without AUTOREQ */
1574 if (!done) {
1575 val = musb_readw(epio, MUSB_RXCSR);
1576 val |= MUSB_RXCSR_H_REQPKT;
1577 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1578 }
1579
1580 return done;
1581 }
1582
1583 /* Disadvantage of using mode 1:
1584 * It's basically usable only for mass storage class; essentially all
1585 * other protocols also terminate transfers on short packets.
1586 *
1587 * Details:
1588 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1589 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1590 * to use the extra IN token to grab the last packet using mode 0, then
1591 * the problem is that you cannot be sure when the device will send the
1592 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1593 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1594 * transfer, while sometimes it is recd just a little late so that if you
1595 * try to configure for mode 0 soon after the mode 1 transfer is
1596 * completed, you will find rxcount 0. Okay, so you might think why not
1597 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1598 */
musb_rx_dma_in_inventra_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len,u8 iso_err)1599 static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1600 struct musb_hw_ep *hw_ep,
1601 struct musb_qh *qh,
1602 struct urb *urb,
1603 size_t len,
1604 u8 iso_err)
1605 {
1606 struct musb *musb = hw_ep->musb;
1607 void __iomem *epio = hw_ep->regs;
1608 struct dma_channel *channel = hw_ep->rx_channel;
1609 u16 rx_count, val;
1610 int length, pipe, done;
1611 dma_addr_t buf;
1612
1613 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1614 pipe = urb->pipe;
1615
1616 if (usb_pipeisoc(pipe)) {
1617 int d_status = 0;
1618 struct usb_iso_packet_descriptor *d;
1619
1620 d = urb->iso_frame_desc + qh->iso_idx;
1621
1622 if (iso_err) {
1623 d_status = -EILSEQ;
1624 urb->error_count++;
1625 }
1626 if (rx_count > d->length) {
1627 if (d_status == 0) {
1628 d_status = -EOVERFLOW;
1629 urb->error_count++;
1630 }
1631 musb_dbg(musb, "** OVERFLOW %d into %d",
1632 rx_count, d->length);
1633
1634 length = d->length;
1635 } else
1636 length = rx_count;
1637 d->status = d_status;
1638 buf = urb->transfer_dma + d->offset;
1639 } else {
1640 length = rx_count;
1641 buf = urb->transfer_dma + urb->actual_length;
1642 }
1643
1644 channel->desired_mode = 0;
1645 #ifdef USE_MODE1
1646 /* because of the issue below, mode 1 will
1647 * only rarely behave with correct semantics.
1648 */
1649 if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1650 && (urb->transfer_buffer_length - urb->actual_length)
1651 > qh->maxpacket)
1652 channel->desired_mode = 1;
1653 if (rx_count < hw_ep->max_packet_sz_rx) {
1654 length = rx_count;
1655 channel->desired_mode = 0;
1656 } else {
1657 length = urb->transfer_buffer_length;
1658 }
1659 #endif
1660
1661 /* See comments above on disadvantages of using mode 1 */
1662 val = musb_readw(epio, MUSB_RXCSR);
1663 val &= ~MUSB_RXCSR_H_REQPKT;
1664
1665 if (channel->desired_mode == 0)
1666 val &= ~MUSB_RXCSR_H_AUTOREQ;
1667 else
1668 val |= MUSB_RXCSR_H_AUTOREQ;
1669 val |= MUSB_RXCSR_DMAENAB;
1670
1671 /* autoclear shouldn't be set in high bandwidth */
1672 if (qh->hb_mult == 1)
1673 val |= MUSB_RXCSR_AUTOCLEAR;
1674
1675 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1676
1677 /* REVISIT if when actual_length != 0,
1678 * transfer_buffer_length needs to be
1679 * adjusted first...
1680 */
1681 done = dma->channel_program(channel, qh->maxpacket,
1682 channel->desired_mode,
1683 buf, length);
1684
1685 if (!done) {
1686 dma->channel_release(channel);
1687 hw_ep->rx_channel = NULL;
1688 channel = NULL;
1689 val = musb_readw(epio, MUSB_RXCSR);
1690 val &= ~(MUSB_RXCSR_DMAENAB
1691 | MUSB_RXCSR_H_AUTOREQ
1692 | MUSB_RXCSR_AUTOCLEAR);
1693 musb_writew(epio, MUSB_RXCSR, val);
1694 }
1695
1696 return done;
1697 }
1698 #else
musb_rx_dma_inventra_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len)1699 static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1700 struct musb_hw_ep *hw_ep,
1701 struct musb_qh *qh,
1702 struct urb *urb,
1703 size_t len)
1704 {
1705 return false;
1706 }
1707
musb_rx_dma_in_inventra_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len,u8 iso_err)1708 static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1709 struct musb_hw_ep *hw_ep,
1710 struct musb_qh *qh,
1711 struct urb *urb,
1712 size_t len,
1713 u8 iso_err)
1714 {
1715 return false;
1716 }
1717 #endif
1718
1719 /*
1720 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1721 * and high-bandwidth IN transfer cases.
1722 */
musb_host_rx(struct musb * musb,u8 epnum)1723 void musb_host_rx(struct musb *musb, u8 epnum)
1724 {
1725 struct urb *urb;
1726 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1727 struct dma_controller *c = musb->dma_controller;
1728 void __iomem *epio = hw_ep->regs;
1729 struct musb_qh *qh = hw_ep->in_qh;
1730 size_t xfer_len;
1731 void __iomem *mbase = musb->mregs;
1732 u16 rx_csr, val;
1733 bool iso_err = false;
1734 bool done = false;
1735 u32 status;
1736 struct dma_channel *dma;
1737 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1738
1739 musb_ep_select(mbase, epnum);
1740
1741 urb = next_urb(qh);
1742 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1743 status = 0;
1744 xfer_len = 0;
1745
1746 rx_csr = musb_readw(epio, MUSB_RXCSR);
1747 val = rx_csr;
1748
1749 if (unlikely(!urb)) {
1750 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1751 * usbtest #11 (unlinks) triggers it regularly, sometimes
1752 * with fifo full. (Only with DMA??)
1753 */
1754 musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1755 epnum, val, musb_readw(epio, MUSB_RXCOUNT));
1756 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1757 return;
1758 }
1759
1760 trace_musb_urb_rx(musb, urb);
1761
1762 /* check for errors, concurrent stall & unlink is not really
1763 * handled yet! */
1764 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1765 musb_dbg(musb, "RX end %d STALL", epnum);
1766
1767 /* stall; record URB status */
1768 status = -EPIPE;
1769
1770 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1771 dev_err(musb->controller, "ep%d RX three-strikes error", epnum);
1772
1773 /*
1774 * The three-strikes error could only happen when the USB
1775 * device is not accessible, for example detached or powered
1776 * off. So return the fatal error -ESHUTDOWN so hopefully the
1777 * USB device drivers won't immediately resubmit the same URB.
1778 */
1779 status = -ESHUTDOWN;
1780 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1781
1782 rx_csr &= ~MUSB_RXCSR_H_ERROR;
1783 musb_writew(epio, MUSB_RXCSR, rx_csr);
1784
1785 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1786
1787 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1788 musb_dbg(musb, "RX end %d NAK timeout", epnum);
1789
1790 /* NOTE: NAKing is *NOT* an error, so we want to
1791 * continue. Except ... if there's a request for
1792 * another QH, use that instead of starving it.
1793 *
1794 * Devices like Ethernet and serial adapters keep
1795 * reads posted at all times, which will starve
1796 * other devices without this logic.
1797 */
1798 if (usb_pipebulk(urb->pipe)
1799 && qh->mux == 1
1800 && !list_is_singular(&musb->in_bulk)) {
1801 musb_bulk_nak_timeout(musb, hw_ep, 1);
1802 return;
1803 }
1804 musb_ep_select(mbase, epnum);
1805 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1806 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1807 musb_writew(epio, MUSB_RXCSR, rx_csr);
1808
1809 goto finish;
1810 } else {
1811 musb_dbg(musb, "RX end %d ISO data error", epnum);
1812 /* packet error reported later */
1813 iso_err = true;
1814 }
1815 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1816 musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
1817 epnum);
1818 status = -EPROTO;
1819 }
1820
1821 /* faults abort the transfer */
1822 if (status) {
1823 /* clean up dma and collect transfer count */
1824 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1825 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1826 musb->dma_controller->channel_abort(dma);
1827 xfer_len = dma->actual_len;
1828 }
1829 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1830 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1831 done = true;
1832 goto finish;
1833 }
1834
1835 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1836 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1837 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1838 goto finish;
1839 }
1840
1841 /* thorough shutdown for now ... given more precise fault handling
1842 * and better queueing support, we might keep a DMA pipeline going
1843 * while processing this irq for earlier completions.
1844 */
1845
1846 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1847 if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1848 (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1849 /* REVISIT this happened for a while on some short reads...
1850 * the cleanup still needs investigation... looks bad...
1851 * and also duplicates dma cleanup code above ... plus,
1852 * shouldn't this be the "half full" double buffer case?
1853 */
1854 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1855 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1856 musb->dma_controller->channel_abort(dma);
1857 xfer_len = dma->actual_len;
1858 done = true;
1859 }
1860
1861 musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
1862 xfer_len, dma ? ", dma" : "");
1863 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1864
1865 musb_ep_select(mbase, epnum);
1866 musb_writew(epio, MUSB_RXCSR,
1867 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1868 }
1869
1870 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1871 xfer_len = dma->actual_len;
1872
1873 val &= ~(MUSB_RXCSR_DMAENAB
1874 | MUSB_RXCSR_H_AUTOREQ
1875 | MUSB_RXCSR_AUTOCLEAR
1876 | MUSB_RXCSR_RXPKTRDY);
1877 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1878
1879 if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1880 musb_dma_cppi41(musb)) {
1881 done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1882 musb_dbg(hw_ep->musb,
1883 "ep %d dma %s, rxcsr %04x, rxcount %d",
1884 epnum, done ? "off" : "reset",
1885 musb_readw(epio, MUSB_RXCSR),
1886 musb_readw(epio, MUSB_RXCOUNT));
1887 } else {
1888 done = true;
1889 }
1890
1891 } else if (urb->status == -EINPROGRESS) {
1892 /* if no errors, be sure a packet is ready for unloading */
1893 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1894 status = -EPROTO;
1895 ERR("Rx interrupt with no errors or packet!\n");
1896
1897 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1898
1899 /* SCRUB (RX) */
1900 /* do the proper sequence to abort the transfer */
1901 musb_ep_select(mbase, epnum);
1902 val &= ~MUSB_RXCSR_H_REQPKT;
1903 musb_writew(epio, MUSB_RXCSR, val);
1904 goto finish;
1905 }
1906
1907 /* we are expecting IN packets */
1908 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1909 musb_dma_cppi41(musb)) && dma) {
1910 musb_dbg(hw_ep->musb,
1911 "RX%d count %d, buffer 0x%llx len %d/%d",
1912 epnum, musb_readw(epio, MUSB_RXCOUNT),
1913 (unsigned long long) urb->transfer_dma
1914 + urb->actual_length,
1915 qh->offset,
1916 urb->transfer_buffer_length);
1917
1918 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1919 xfer_len, iso_err))
1920 goto finish;
1921 else
1922 dev_err(musb->controller, "error: rx_dma failed\n");
1923 }
1924
1925 if (!dma) {
1926 unsigned int received_len;
1927
1928 /* Unmap the buffer so that CPU can use it */
1929 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1930
1931 /*
1932 * We need to map sg if the transfer_buffer is
1933 * NULL.
1934 */
1935 if (!urb->transfer_buffer) {
1936 qh->use_sg = true;
1937 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1938 sg_flags);
1939 }
1940
1941 if (qh->use_sg) {
1942 if (!sg_miter_next(&qh->sg_miter)) {
1943 dev_err(musb->controller, "error: sg list empty\n");
1944 sg_miter_stop(&qh->sg_miter);
1945 status = -EINVAL;
1946 done = true;
1947 goto finish;
1948 }
1949 urb->transfer_buffer = qh->sg_miter.addr;
1950 received_len = urb->actual_length;
1951 qh->offset = 0x0;
1952 done = musb_host_packet_rx(musb, urb, epnum,
1953 iso_err);
1954 /* Calculate the number of bytes received */
1955 received_len = urb->actual_length -
1956 received_len;
1957 qh->sg_miter.consumed = received_len;
1958 sg_miter_stop(&qh->sg_miter);
1959 } else {
1960 done = musb_host_packet_rx(musb, urb,
1961 epnum, iso_err);
1962 }
1963 musb_dbg(musb, "read %spacket", done ? "last " : "");
1964 }
1965 }
1966
1967 finish:
1968 urb->actual_length += xfer_len;
1969 qh->offset += xfer_len;
1970 if (done) {
1971 if (qh->use_sg) {
1972 qh->use_sg = false;
1973 urb->transfer_buffer = NULL;
1974 }
1975
1976 if (urb->status == -EINPROGRESS)
1977 urb->status = status;
1978 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1979 }
1980 }
1981
1982 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1983 * the software schedule associates multiple such nodes with a given
1984 * host side hardware endpoint + direction; scheduling may activate
1985 * that hardware endpoint.
1986 */
musb_schedule(struct musb * musb,struct musb_qh * qh,int is_in)1987 static int musb_schedule(
1988 struct musb *musb,
1989 struct musb_qh *qh,
1990 int is_in)
1991 {
1992 int idle = 0;
1993 int best_diff;
1994 int best_end, epnum;
1995 struct musb_hw_ep *hw_ep = NULL;
1996 struct list_head *head = NULL;
1997 u8 toggle;
1998 u8 txtype;
1999 struct urb *urb = next_urb(qh);
2000
2001 /* use fixed hardware for control and bulk */
2002 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2003 head = &musb->control;
2004 hw_ep = musb->control_ep;
2005 goto success;
2006 }
2007
2008 /* else, periodic transfers get muxed to other endpoints */
2009
2010 /*
2011 * We know this qh hasn't been scheduled, so all we need to do
2012 * is choose which hardware endpoint to put it on ...
2013 *
2014 * REVISIT what we really want here is a regular schedule tree
2015 * like e.g. OHCI uses.
2016 */
2017 best_diff = 4096;
2018 best_end = -1;
2019
2020 for (epnum = 1, hw_ep = musb->endpoints + 1;
2021 epnum < musb->nr_endpoints;
2022 epnum++, hw_ep++) {
2023 int diff;
2024
2025 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2026 continue;
2027
2028 if (hw_ep == musb->bulk_ep)
2029 continue;
2030
2031 if (is_in)
2032 diff = hw_ep->max_packet_sz_rx;
2033 else
2034 diff = hw_ep->max_packet_sz_tx;
2035 diff -= (qh->maxpacket * qh->hb_mult);
2036
2037 if (diff >= 0 && best_diff > diff) {
2038
2039 /*
2040 * Mentor controller has a bug in that if we schedule
2041 * a BULK Tx transfer on an endpoint that had earlier
2042 * handled ISOC then the BULK transfer has to start on
2043 * a zero toggle. If the BULK transfer starts on a 1
2044 * toggle then this transfer will fail as the mentor
2045 * controller starts the Bulk transfer on a 0 toggle
2046 * irrespective of the programming of the toggle bits
2047 * in the TXCSR register. Check for this condition
2048 * while allocating the EP for a Tx Bulk transfer. If
2049 * so skip this EP.
2050 */
2051 hw_ep = musb->endpoints + epnum;
2052 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2053 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2054 >> 4) & 0x3;
2055 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2056 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2057 continue;
2058
2059 best_diff = diff;
2060 best_end = epnum;
2061 }
2062 }
2063 /* use bulk reserved ep1 if no other ep is free */
2064 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2065 hw_ep = musb->bulk_ep;
2066 if (is_in)
2067 head = &musb->in_bulk;
2068 else
2069 head = &musb->out_bulk;
2070
2071 /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
2072 * multiplexed. This scheme does not work in high speed to full
2073 * speed scenario as NAK interrupts are not coming from a
2074 * full speed device connected to a high speed device.
2075 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
2076 * 4 (8 frame or 8ms) for FS device.
2077 */
2078 if (qh->dev)
2079 qh->intv_reg =
2080 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2081 goto success;
2082 } else if (best_end < 0) {
2083 dev_err(musb->controller,
2084 "%s hwep alloc failed for %dx%d\n",
2085 musb_ep_xfertype_string(qh->type),
2086 qh->hb_mult, qh->maxpacket);
2087 return -ENOSPC;
2088 }
2089
2090 idle = 1;
2091 qh->mux = 0;
2092 hw_ep = musb->endpoints + best_end;
2093 musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2094 success:
2095 if (head) {
2096 idle = list_empty(head);
2097 list_add_tail(&qh->ring, head);
2098 qh->mux = 1;
2099 }
2100 qh->hw_ep = hw_ep;
2101 qh->hep->hcpriv = qh;
2102 if (idle)
2103 musb_start_urb(musb, is_in, qh);
2104 return 0;
2105 }
2106
musb_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)2107 static int musb_urb_enqueue(
2108 struct usb_hcd *hcd,
2109 struct urb *urb,
2110 gfp_t mem_flags)
2111 {
2112 unsigned long flags;
2113 struct musb *musb = hcd_to_musb(hcd);
2114 struct usb_host_endpoint *hep = urb->ep;
2115 struct musb_qh *qh;
2116 struct usb_endpoint_descriptor *epd = &hep->desc;
2117 int ret;
2118 unsigned type_reg;
2119 unsigned interval;
2120
2121 /* host role must be active */
2122 if (!is_host_active(musb) || !musb->is_active)
2123 return -ENODEV;
2124
2125 trace_musb_urb_enq(musb, urb);
2126
2127 spin_lock_irqsave(&musb->lock, flags);
2128 ret = usb_hcd_link_urb_to_ep(hcd, urb);
2129 qh = ret ? NULL : hep->hcpriv;
2130 if (qh)
2131 urb->hcpriv = qh;
2132 spin_unlock_irqrestore(&musb->lock, flags);
2133
2134 /* DMA mapping was already done, if needed, and this urb is on
2135 * hep->urb_list now ... so we're done, unless hep wasn't yet
2136 * scheduled onto a live qh.
2137 *
2138 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2139 * disabled, testing for empty qh->ring and avoiding qh setup costs
2140 * except for the first urb queued after a config change.
2141 */
2142 if (qh || ret)
2143 return ret;
2144
2145 /* Allocate and initialize qh, minimizing the work done each time
2146 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
2147 *
2148 * REVISIT consider a dedicated qh kmem_cache, so it's harder
2149 * for bugs in other kernel code to break this driver...
2150 */
2151 qh = kzalloc(sizeof *qh, mem_flags);
2152 if (!qh) {
2153 spin_lock_irqsave(&musb->lock, flags);
2154 usb_hcd_unlink_urb_from_ep(hcd, urb);
2155 spin_unlock_irqrestore(&musb->lock, flags);
2156 return -ENOMEM;
2157 }
2158
2159 qh->hep = hep;
2160 qh->dev = urb->dev;
2161 INIT_LIST_HEAD(&qh->ring);
2162 qh->is_ready = 1;
2163
2164 qh->maxpacket = usb_endpoint_maxp(epd);
2165 qh->type = usb_endpoint_type(epd);
2166
2167 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2168 * Some musb cores don't support high bandwidth ISO transfers; and
2169 * we don't (yet!) support high bandwidth interrupt transfers.
2170 */
2171 qh->hb_mult = usb_endpoint_maxp_mult(epd);
2172 if (qh->hb_mult > 1) {
2173 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2174
2175 if (ok)
2176 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2177 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2178 if (!ok) {
2179 dev_err(musb->controller,
2180 "high bandwidth %s (%dx%d) not supported\n",
2181 musb_ep_xfertype_string(qh->type),
2182 qh->hb_mult, qh->maxpacket & 0x7ff);
2183 ret = -EMSGSIZE;
2184 goto done;
2185 }
2186 qh->maxpacket &= 0x7ff;
2187 }
2188
2189 qh->epnum = usb_endpoint_num(epd);
2190
2191 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2192 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2193
2194 /* precompute rxtype/txtype/type0 register */
2195 type_reg = (qh->type << 4) | qh->epnum;
2196 switch (urb->dev->speed) {
2197 case USB_SPEED_LOW:
2198 type_reg |= 0xc0;
2199 break;
2200 case USB_SPEED_FULL:
2201 type_reg |= 0x80;
2202 break;
2203 default:
2204 type_reg |= 0x40;
2205 }
2206 qh->type_reg = type_reg;
2207
2208 /* Precompute RXINTERVAL/TXINTERVAL register */
2209 switch (qh->type) {
2210 case USB_ENDPOINT_XFER_INT:
2211 /*
2212 * Full/low speeds use the linear encoding,
2213 * high speed uses the logarithmic encoding.
2214 */
2215 if (urb->dev->speed <= USB_SPEED_FULL) {
2216 interval = max_t(u8, epd->bInterval, 1);
2217 break;
2218 }
2219 fallthrough;
2220 case USB_ENDPOINT_XFER_ISOC:
2221 /* ISO always uses logarithmic encoding */
2222 interval = min_t(u8, epd->bInterval, 16);
2223 break;
2224 default:
2225 /* REVISIT we actually want to use NAK limits, hinting to the
2226 * transfer scheduling logic to try some other qh, e.g. try
2227 * for 2 msec first:
2228 *
2229 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2230 *
2231 * The downside of disabling this is that transfer scheduling
2232 * gets VERY unfair for nonperiodic transfers; a misbehaving
2233 * peripheral could make that hurt. That's perfectly normal
2234 * for reads from network or serial adapters ... so we have
2235 * partial NAKlimit support for bulk RX.
2236 *
2237 * The upside of disabling it is simpler transfer scheduling.
2238 */
2239 interval = 0;
2240 }
2241 qh->intv_reg = interval;
2242
2243 /* precompute addressing for external hub/tt ports */
2244 if (musb->is_multipoint) {
2245 struct usb_device *parent = urb->dev->parent;
2246
2247 if (parent != hcd->self.root_hub) {
2248 qh->h_addr_reg = (u8) parent->devnum;
2249
2250 /* set up tt info if needed */
2251 if (urb->dev->tt) {
2252 qh->h_port_reg = (u8) urb->dev->ttport;
2253 if (urb->dev->tt->hub)
2254 qh->h_addr_reg =
2255 (u8) urb->dev->tt->hub->devnum;
2256 if (urb->dev->tt->multi)
2257 qh->h_addr_reg |= 0x80;
2258 }
2259 }
2260 }
2261
2262 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2263 * until we get real dma queues (with an entry for each urb/buffer),
2264 * we only have work to do in the former case.
2265 */
2266 spin_lock_irqsave(&musb->lock, flags);
2267 if (hep->hcpriv || !next_urb(qh)) {
2268 /* some concurrent activity submitted another urb to hep...
2269 * odd, rare, error prone, but legal.
2270 */
2271 kfree(qh);
2272 qh = NULL;
2273 ret = 0;
2274 } else
2275 ret = musb_schedule(musb, qh,
2276 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2277
2278 if (ret == 0) {
2279 urb->hcpriv = qh;
2280 /* FIXME set urb->start_frame for iso/intr, it's tested in
2281 * musb_start_urb(), but otherwise only konicawc cares ...
2282 */
2283 }
2284 spin_unlock_irqrestore(&musb->lock, flags);
2285
2286 done:
2287 if (ret != 0) {
2288 spin_lock_irqsave(&musb->lock, flags);
2289 usb_hcd_unlink_urb_from_ep(hcd, urb);
2290 spin_unlock_irqrestore(&musb->lock, flags);
2291 kfree(qh);
2292 }
2293 return ret;
2294 }
2295
2296
2297 /*
2298 * abort a transfer that's at the head of a hardware queue.
2299 * called with controller locked, irqs blocked
2300 * that hardware queue advances to the next transfer, unless prevented
2301 */
musb_cleanup_urb(struct urb * urb,struct musb_qh * qh)2302 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2303 {
2304 struct musb_hw_ep *ep = qh->hw_ep;
2305 struct musb *musb = ep->musb;
2306 void __iomem *epio = ep->regs;
2307 unsigned hw_end = ep->epnum;
2308 void __iomem *regs = ep->musb->mregs;
2309 int is_in = usb_pipein(urb->pipe);
2310 int status = 0;
2311 u16 csr;
2312 struct dma_channel *dma = NULL;
2313
2314 musb_ep_select(regs, hw_end);
2315
2316 if (is_dma_capable()) {
2317 dma = is_in ? ep->rx_channel : ep->tx_channel;
2318 if (dma) {
2319 status = ep->musb->dma_controller->channel_abort(dma);
2320 musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
2321 is_in ? 'R' : 'T', ep->epnum,
2322 urb, status);
2323 urb->actual_length += dma->actual_len;
2324 }
2325 }
2326
2327 /* turn off DMA requests, discard state, stop polling ... */
2328 if (ep->epnum && is_in) {
2329 /* giveback saves bulk toggle */
2330 csr = musb_h_flush_rxfifo(ep, 0);
2331
2332 /* clear the endpoint's irq status here to avoid bogus irqs */
2333 if (is_dma_capable() && dma)
2334 musb_platform_clear_ep_rxintr(musb, ep->epnum);
2335 } else if (ep->epnum) {
2336 musb_h_tx_flush_fifo(ep);
2337 csr = musb_readw(epio, MUSB_TXCSR);
2338 csr &= ~(MUSB_TXCSR_AUTOSET
2339 | MUSB_TXCSR_DMAENAB
2340 | MUSB_TXCSR_H_RXSTALL
2341 | MUSB_TXCSR_H_NAKTIMEOUT
2342 | MUSB_TXCSR_H_ERROR
2343 | MUSB_TXCSR_TXPKTRDY);
2344 musb_writew(epio, MUSB_TXCSR, csr);
2345 /* REVISIT may need to clear FLUSHFIFO ... */
2346 musb_writew(epio, MUSB_TXCSR, csr);
2347 /* flush cpu writebuffer */
2348 csr = musb_readw(epio, MUSB_TXCSR);
2349 } else {
2350 musb_h_ep0_flush_fifo(ep);
2351 }
2352 if (status == 0)
2353 musb_advance_schedule(ep->musb, urb, ep, is_in);
2354 return status;
2355 }
2356
musb_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)2357 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2358 {
2359 struct musb *musb = hcd_to_musb(hcd);
2360 struct musb_qh *qh;
2361 unsigned long flags;
2362 int is_in = usb_pipein(urb->pipe);
2363 int ret;
2364
2365 trace_musb_urb_deq(musb, urb);
2366
2367 spin_lock_irqsave(&musb->lock, flags);
2368 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2369 if (ret)
2370 goto done;
2371
2372 qh = urb->hcpriv;
2373 if (!qh)
2374 goto done;
2375
2376 /*
2377 * Any URB not actively programmed into endpoint hardware can be
2378 * immediately given back; that's any URB not at the head of an
2379 * endpoint queue, unless someday we get real DMA queues. And even
2380 * if it's at the head, it might not be known to the hardware...
2381 *
2382 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2383 * has already been updated. This is a synchronous abort; it'd be
2384 * OK to hold off until after some IRQ, though.
2385 *
2386 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2387 */
2388 if (!qh->is_ready
2389 || urb->urb_list.prev != &qh->hep->urb_list
2390 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2391 int ready = qh->is_ready;
2392
2393 qh->is_ready = 0;
2394 musb_giveback(musb, urb, 0);
2395 qh->is_ready = ready;
2396
2397 /* If nothing else (usually musb_giveback) is using it
2398 * and its URB list has emptied, recycle this qh.
2399 */
2400 if (ready && list_empty(&qh->hep->urb_list)) {
2401 qh->hep->hcpriv = NULL;
2402 list_del(&qh->ring);
2403 kfree(qh);
2404 }
2405 } else
2406 ret = musb_cleanup_urb(urb, qh);
2407 done:
2408 spin_unlock_irqrestore(&musb->lock, flags);
2409 return ret;
2410 }
2411
2412 /* disable an endpoint */
2413 static void
musb_h_disable(struct usb_hcd * hcd,struct usb_host_endpoint * hep)2414 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2415 {
2416 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2417 unsigned long flags;
2418 struct musb *musb = hcd_to_musb(hcd);
2419 struct musb_qh *qh;
2420 struct urb *urb;
2421
2422 spin_lock_irqsave(&musb->lock, flags);
2423
2424 qh = hep->hcpriv;
2425 if (qh == NULL)
2426 goto exit;
2427
2428 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2429
2430 /* Kick the first URB off the hardware, if needed */
2431 qh->is_ready = 0;
2432 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2433 urb = next_urb(qh);
2434
2435 /* make software (then hardware) stop ASAP */
2436 if (!urb->unlinked)
2437 urb->status = -ESHUTDOWN;
2438
2439 /* cleanup */
2440 musb_cleanup_urb(urb, qh);
2441
2442 /* Then nuke all the others ... and advance the
2443 * queue on hw_ep (e.g. bulk ring) when we're done.
2444 */
2445 while (!list_empty(&hep->urb_list)) {
2446 urb = next_urb(qh);
2447 urb->status = -ESHUTDOWN;
2448 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2449 }
2450 } else {
2451 /* Just empty the queue; the hardware is busy with
2452 * other transfers, and since !qh->is_ready nothing
2453 * will activate any of these as it advances.
2454 */
2455 while (!list_empty(&hep->urb_list))
2456 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2457
2458 hep->hcpriv = NULL;
2459 list_del(&qh->ring);
2460 kfree(qh);
2461 }
2462 exit:
2463 spin_unlock_irqrestore(&musb->lock, flags);
2464 }
2465
musb_h_get_frame_number(struct usb_hcd * hcd)2466 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2467 {
2468 struct musb *musb = hcd_to_musb(hcd);
2469
2470 return musb_readw(musb->mregs, MUSB_FRAME);
2471 }
2472
musb_h_start(struct usb_hcd * hcd)2473 static int musb_h_start(struct usb_hcd *hcd)
2474 {
2475 struct musb *musb = hcd_to_musb(hcd);
2476
2477 /* NOTE: musb_start() is called when the hub driver turns
2478 * on port power, or when (OTG) peripheral starts.
2479 */
2480 hcd->state = HC_STATE_RUNNING;
2481 musb->port1_status = 0;
2482 return 0;
2483 }
2484
musb_h_stop(struct usb_hcd * hcd)2485 static void musb_h_stop(struct usb_hcd *hcd)
2486 {
2487 musb_stop(hcd_to_musb(hcd));
2488 hcd->state = HC_STATE_HALT;
2489 }
2490
musb_bus_suspend(struct usb_hcd * hcd)2491 static int musb_bus_suspend(struct usb_hcd *hcd)
2492 {
2493 struct musb *musb = hcd_to_musb(hcd);
2494 u8 devctl;
2495 int ret;
2496
2497 ret = musb_port_suspend(musb, true);
2498 if (ret)
2499 return ret;
2500
2501 if (!is_host_active(musb))
2502 return 0;
2503
2504 switch (musb->xceiv->otg->state) {
2505 case OTG_STATE_A_SUSPEND:
2506 return 0;
2507 case OTG_STATE_A_WAIT_VRISE:
2508 /* ID could be grounded even if there's no device
2509 * on the other end of the cable. NOTE that the
2510 * A_WAIT_VRISE timers are messy with MUSB...
2511 */
2512 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2513 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2514 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2515 break;
2516 default:
2517 break;
2518 }
2519
2520 if (musb->is_active) {
2521 WARNING("trying to suspend as %s while active\n",
2522 usb_otg_state_string(musb->xceiv->otg->state));
2523 return -EBUSY;
2524 } else
2525 return 0;
2526 }
2527
musb_bus_resume(struct usb_hcd * hcd)2528 static int musb_bus_resume(struct usb_hcd *hcd)
2529 {
2530 struct musb *musb = hcd_to_musb(hcd);
2531
2532 if (musb->config &&
2533 musb->config->host_port_deassert_reset_at_resume)
2534 musb_port_reset(musb, false);
2535
2536 return 0;
2537 }
2538
2539 #ifndef CONFIG_MUSB_PIO_ONLY
2540
2541 #define MUSB_USB_DMA_ALIGN 4
2542
2543 struct musb_temp_buffer {
2544 void *kmalloc_ptr;
2545 void *old_xfer_buffer;
2546 u8 data[];
2547 };
2548
musb_free_temp_buffer(struct urb * urb)2549 static void musb_free_temp_buffer(struct urb *urb)
2550 {
2551 enum dma_data_direction dir;
2552 struct musb_temp_buffer *temp;
2553 size_t length;
2554
2555 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2556 return;
2557
2558 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2559
2560 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2561 data);
2562
2563 if (dir == DMA_FROM_DEVICE) {
2564 if (usb_pipeisoc(urb->pipe))
2565 length = urb->transfer_buffer_length;
2566 else
2567 length = urb->actual_length;
2568
2569 memcpy(temp->old_xfer_buffer, temp->data, length);
2570 }
2571 urb->transfer_buffer = temp->old_xfer_buffer;
2572 kfree(temp->kmalloc_ptr);
2573
2574 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2575 }
2576
musb_alloc_temp_buffer(struct urb * urb,gfp_t mem_flags)2577 static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2578 {
2579 enum dma_data_direction dir;
2580 struct musb_temp_buffer *temp;
2581 void *kmalloc_ptr;
2582 size_t kmalloc_size;
2583
2584 if (urb->num_sgs || urb->sg ||
2585 urb->transfer_buffer_length == 0 ||
2586 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2587 return 0;
2588
2589 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2590
2591 /* Allocate a buffer with enough padding for alignment */
2592 kmalloc_size = urb->transfer_buffer_length +
2593 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2594
2595 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2596 if (!kmalloc_ptr)
2597 return -ENOMEM;
2598
2599 /* Position our struct temp_buffer such that data is aligned */
2600 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2601
2602
2603 temp->kmalloc_ptr = kmalloc_ptr;
2604 temp->old_xfer_buffer = urb->transfer_buffer;
2605 if (dir == DMA_TO_DEVICE)
2606 memcpy(temp->data, urb->transfer_buffer,
2607 urb->transfer_buffer_length);
2608 urb->transfer_buffer = temp->data;
2609
2610 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2611
2612 return 0;
2613 }
2614
musb_map_urb_for_dma(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)2615 static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2616 gfp_t mem_flags)
2617 {
2618 struct musb *musb = hcd_to_musb(hcd);
2619 int ret;
2620
2621 /*
2622 * The DMA engine in RTL1.8 and above cannot handle
2623 * DMA addresses that are not aligned to a 4 byte boundary.
2624 * For such engine implemented (un)map_urb_for_dma hooks.
2625 * Do not use these hooks for RTL<1.8
2626 */
2627 if (musb->hwvers < MUSB_HWVERS_1800)
2628 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2629
2630 ret = musb_alloc_temp_buffer(urb, mem_flags);
2631 if (ret)
2632 return ret;
2633
2634 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2635 if (ret)
2636 musb_free_temp_buffer(urb);
2637
2638 return ret;
2639 }
2640
musb_unmap_urb_for_dma(struct usb_hcd * hcd,struct urb * urb)2641 static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2642 {
2643 struct musb *musb = hcd_to_musb(hcd);
2644
2645 usb_hcd_unmap_urb_for_dma(hcd, urb);
2646
2647 /* Do not use this hook for RTL<1.8 (see description above) */
2648 if (musb->hwvers < MUSB_HWVERS_1800)
2649 return;
2650
2651 musb_free_temp_buffer(urb);
2652 }
2653 #endif /* !CONFIG_MUSB_PIO_ONLY */
2654
2655 static const struct hc_driver musb_hc_driver = {
2656 .description = "musb-hcd",
2657 .product_desc = "MUSB HDRC host driver",
2658 .hcd_priv_size = sizeof(struct musb *),
2659 .flags = HCD_USB2 | HCD_DMA | HCD_MEMORY,
2660
2661 /* not using irq handler or reset hooks from usbcore, since
2662 * those must be shared with peripheral code for OTG configs
2663 */
2664
2665 .start = musb_h_start,
2666 .stop = musb_h_stop,
2667
2668 .get_frame_number = musb_h_get_frame_number,
2669
2670 .urb_enqueue = musb_urb_enqueue,
2671 .urb_dequeue = musb_urb_dequeue,
2672 .endpoint_disable = musb_h_disable,
2673
2674 #ifndef CONFIG_MUSB_PIO_ONLY
2675 .map_urb_for_dma = musb_map_urb_for_dma,
2676 .unmap_urb_for_dma = musb_unmap_urb_for_dma,
2677 #endif
2678
2679 .hub_status_data = musb_hub_status_data,
2680 .hub_control = musb_hub_control,
2681 .bus_suspend = musb_bus_suspend,
2682 .bus_resume = musb_bus_resume,
2683 /* .start_port_reset = NULL, */
2684 /* .hub_irq_enable = NULL, */
2685 };
2686
musb_host_alloc(struct musb * musb)2687 int musb_host_alloc(struct musb *musb)
2688 {
2689 struct device *dev = musb->controller;
2690
2691 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
2692 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2693 if (!musb->hcd)
2694 return -EINVAL;
2695
2696 *musb->hcd->hcd_priv = (unsigned long) musb;
2697 musb->hcd->self.uses_pio_for_control = 1;
2698 musb->hcd->uses_new_polling = 1;
2699 musb->hcd->has_tt = 1;
2700
2701 return 0;
2702 }
2703
musb_host_cleanup(struct musb * musb)2704 void musb_host_cleanup(struct musb *musb)
2705 {
2706 if (musb->port_mode == MUSB_PERIPHERAL)
2707 return;
2708 usb_remove_hcd(musb->hcd);
2709 }
2710
musb_host_free(struct musb * musb)2711 void musb_host_free(struct musb *musb)
2712 {
2713 usb_put_hcd(musb->hcd);
2714 }
2715
musb_host_setup(struct musb * musb,int power_budget)2716 int musb_host_setup(struct musb *musb, int power_budget)
2717 {
2718 int ret;
2719 struct usb_hcd *hcd = musb->hcd;
2720
2721 if (musb->port_mode == MUSB_HOST) {
2722 MUSB_HST_MODE(musb);
2723 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2724 }
2725 otg_set_host(musb->xceiv->otg, &hcd->self);
2726 /* don't support otg protocols */
2727 hcd->self.otg_port = 0;
2728 musb->xceiv->otg->host = &hcd->self;
2729 hcd->power_budget = 2 * (power_budget ? : 250);
2730 hcd->skip_phy_initialization = 1;
2731
2732 ret = usb_add_hcd(hcd, 0, 0);
2733 if (ret < 0)
2734 return ret;
2735
2736 device_wakeup_enable(hcd->self.controller);
2737 return 0;
2738 }
2739
musb_host_resume_root_hub(struct musb * musb)2740 void musb_host_resume_root_hub(struct musb *musb)
2741 {
2742 usb_hcd_resume_root_hub(musb->hcd);
2743 }
2744
musb_host_poke_root_hub(struct musb * musb)2745 void musb_host_poke_root_hub(struct musb *musb)
2746 {
2747 MUSB_HST_MODE(musb);
2748 if (musb->hcd->status_urb)
2749 usb_hcd_poll_rh_status(musb->hcd);
2750 else
2751 usb_hcd_resume_root_hub(musb->hcd);
2752 }
2753