1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2, as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <linux/kernel.h>
40 #include <linux/delay.h>
41 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/platform_device.h>
44 #include <linux/pm_runtime.h>
45 #include <linux/interrupt.h>
46 #include <linux/io.h>
47 #include <linux/list.h>
48 #include <linux/dma-mapping.h>
49
50 #include <linux/usb/ch9.h>
51 #include <linux/usb/gadget.h>
52
53 #include "core.h"
54 #include "gadget.h"
55 #include "io.h"
56
57 /**
58 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
59 * @dwc: pointer to our context structure
60 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
61 *
62 * Caller should take care of locking. This function will
63 * return 0 on success or -EINVAL if wrong Test Selector
64 * is passed
65 */
dwc3_gadget_set_test_mode(struct dwc3 * dwc,int mode)66 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
67 {
68 u32 reg;
69
70 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
71 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
72
73 switch (mode) {
74 case TEST_J:
75 case TEST_K:
76 case TEST_SE0_NAK:
77 case TEST_PACKET:
78 case TEST_FORCE_EN:
79 reg |= mode << 1;
80 break;
81 default:
82 return -EINVAL;
83 }
84
85 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
86
87 return 0;
88 }
89
90 /**
91 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
92 * @dwc: pointer to our context structure
93 * @state: the state to put link into
94 *
95 * Caller should take care of locking. This function will
96 * return 0 on success or -ETIMEDOUT.
97 */
dwc3_gadget_set_link_state(struct dwc3 * dwc,enum dwc3_link_state state)98 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
99 {
100 int retries = 10000;
101 u32 reg;
102
103 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
104 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
105
106 /* set requested state */
107 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
108 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
109
110 /* wait for a change in DSTS */
111 while (--retries) {
112 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
113
114 if (DWC3_DSTS_USBLNKST(reg) == state)
115 return 0;
116
117 udelay(5);
118 }
119
120 dev_vdbg(dwc->dev, "link state change request timed out\n");
121
122 return -ETIMEDOUT;
123 }
124
125 /**
126 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
127 * @dwc: pointer to our context structure
128 *
129 * This function will a best effort FIFO allocation in order
130 * to improve FIFO usage and throughput, while still allowing
131 * us to enable as many endpoints as possible.
132 *
133 * Keep in mind that this operation will be highly dependent
134 * on the configured size for RAM1 - which contains TxFifo -,
135 * the amount of endpoints enabled on coreConsultant tool, and
136 * the width of the Master Bus.
137 *
138 * In the ideal world, we would always be able to satisfy the
139 * following equation:
140 *
141 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
142 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
143 *
144 * Unfortunately, due to many variables that's not always the case.
145 */
dwc3_gadget_resize_tx_fifos(struct dwc3 * dwc)146 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
147 {
148 int last_fifo_depth = 0;
149 int ram1_depth;
150 int fifo_size;
151 int mdwidth;
152 int num;
153
154 if (!dwc->needs_fifo_resize)
155 return 0;
156
157 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
158 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
159
160 /* MDWIDTH is represented in bits, we need it in bytes */
161 mdwidth >>= 3;
162
163 /*
164 * FIXME For now we will only allocate 1 wMaxPacketSize space
165 * for each enabled endpoint, later patches will come to
166 * improve this algorithm so that we better use the internal
167 * FIFO space
168 */
169 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
170 struct dwc3_ep *dep = dwc->eps[num];
171 int fifo_number = dep->number >> 1;
172 int mult = 1;
173 int tmp;
174
175 if (!(dep->number & 1))
176 continue;
177
178 if (!(dep->flags & DWC3_EP_ENABLED))
179 continue;
180
181 if (usb_endpoint_xfer_bulk(dep->desc)
182 || usb_endpoint_xfer_isoc(dep->desc))
183 mult = 3;
184
185 /*
186 * REVISIT: the following assumes we will always have enough
187 * space available on the FIFO RAM for all possible use cases.
188 * Make sure that's true somehow and change FIFO allocation
189 * accordingly.
190 *
191 * If we have Bulk or Isochronous endpoints, we want
192 * them to be able to be very, very fast. So we're giving
193 * those endpoints a fifo_size which is enough for 3 full
194 * packets
195 */
196 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
197 tmp += mdwidth;
198
199 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
200
201 fifo_size |= (last_fifo_depth << 16);
202
203 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
204 dep->name, last_fifo_depth, fifo_size & 0xffff);
205
206 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
207 fifo_size);
208
209 last_fifo_depth += (fifo_size & 0xffff);
210 }
211
212 return 0;
213 }
214
dwc3_gadget_giveback(struct dwc3_ep * dep,struct dwc3_request * req,int status)215 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
216 int status)
217 {
218 struct dwc3 *dwc = dep->dwc;
219
220 if (req->queued) {
221 if (req->request.num_mapped_sgs)
222 dep->busy_slot += req->request.num_mapped_sgs;
223 else
224 dep->busy_slot++;
225
226 /*
227 * Skip LINK TRB. We can't use req->trb and check for
228 * DWC3_TRBCTL_LINK_TRB because it points the TRB we just
229 * completed (not the LINK TRB).
230 */
231 if (((dep->busy_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
232 usb_endpoint_xfer_isoc(dep->desc))
233 dep->busy_slot++;
234 }
235 list_del(&req->list);
236 req->trb = NULL;
237
238 if (req->request.status == -EINPROGRESS)
239 req->request.status = status;
240
241 if (dwc->ep0_bounced && dep->number == 0)
242 dwc->ep0_bounced = false;
243 else
244 usb_gadget_unmap_request(&dwc->gadget, &req->request,
245 req->direction);
246
247 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
248 req, dep->name, req->request.actual,
249 req->request.length, status);
250
251 spin_unlock(&dwc->lock);
252 req->request.complete(&dep->endpoint, &req->request);
253 spin_lock(&dwc->lock);
254 }
255
dwc3_gadget_ep_cmd_string(u8 cmd)256 static const char *dwc3_gadget_ep_cmd_string(u8 cmd)
257 {
258 switch (cmd) {
259 case DWC3_DEPCMD_DEPSTARTCFG:
260 return "Start New Configuration";
261 case DWC3_DEPCMD_ENDTRANSFER:
262 return "End Transfer";
263 case DWC3_DEPCMD_UPDATETRANSFER:
264 return "Update Transfer";
265 case DWC3_DEPCMD_STARTTRANSFER:
266 return "Start Transfer";
267 case DWC3_DEPCMD_CLEARSTALL:
268 return "Clear Stall";
269 case DWC3_DEPCMD_SETSTALL:
270 return "Set Stall";
271 case DWC3_DEPCMD_GETSEQNUMBER:
272 return "Get Data Sequence Number";
273 case DWC3_DEPCMD_SETTRANSFRESOURCE:
274 return "Set Endpoint Transfer Resource";
275 case DWC3_DEPCMD_SETEPCONFIG:
276 return "Set Endpoint Configuration";
277 default:
278 return "UNKNOWN command";
279 }
280 }
281
dwc3_send_gadget_ep_cmd(struct dwc3 * dwc,unsigned ep,unsigned cmd,struct dwc3_gadget_ep_cmd_params * params)282 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
283 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
284 {
285 struct dwc3_ep *dep = dwc->eps[ep];
286 u32 timeout = 500;
287 u32 reg;
288
289 dev_vdbg(dwc->dev, "%s: cmd '%s' params %08x %08x %08x\n",
290 dep->name,
291 dwc3_gadget_ep_cmd_string(cmd), params->param0,
292 params->param1, params->param2);
293
294 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
295 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
296 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
297
298 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
299 do {
300 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
301 if (!(reg & DWC3_DEPCMD_CMDACT)) {
302 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
303 DWC3_DEPCMD_STATUS(reg));
304 return 0;
305 }
306
307 /*
308 * We can't sleep here, because it is also called from
309 * interrupt context.
310 */
311 timeout--;
312 if (!timeout)
313 return -ETIMEDOUT;
314
315 udelay(1);
316 } while (1);
317 }
318
dwc3_trb_dma_offset(struct dwc3_ep * dep,struct dwc3_trb * trb)319 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
320 struct dwc3_trb *trb)
321 {
322 u32 offset = (char *) trb - (char *) dep->trb_pool;
323
324 return dep->trb_pool_dma + offset;
325 }
326
dwc3_alloc_trb_pool(struct dwc3_ep * dep)327 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
328 {
329 struct dwc3 *dwc = dep->dwc;
330
331 if (dep->trb_pool)
332 return 0;
333
334 if (dep->number == 0 || dep->number == 1)
335 return 0;
336
337 dep->trb_pool = dma_alloc_coherent(dwc->dev,
338 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
339 &dep->trb_pool_dma, GFP_KERNEL);
340 if (!dep->trb_pool) {
341 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
342 dep->name);
343 return -ENOMEM;
344 }
345
346 return 0;
347 }
348
dwc3_free_trb_pool(struct dwc3_ep * dep)349 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
350 {
351 struct dwc3 *dwc = dep->dwc;
352
353 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
354 dep->trb_pool, dep->trb_pool_dma);
355
356 dep->trb_pool = NULL;
357 dep->trb_pool_dma = 0;
358 }
359
dwc3_gadget_start_config(struct dwc3 * dwc,struct dwc3_ep * dep)360 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
361 {
362 struct dwc3_gadget_ep_cmd_params params;
363 u32 cmd;
364
365 memset(¶ms, 0x00, sizeof(params));
366
367 if (dep->number != 1) {
368 cmd = DWC3_DEPCMD_DEPSTARTCFG;
369 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
370 if (dep->number > 1) {
371 if (dwc->start_config_issued)
372 return 0;
373 dwc->start_config_issued = true;
374 cmd |= DWC3_DEPCMD_PARAM(2);
375 }
376
377 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms);
378 }
379
380 return 0;
381 }
382
dwc3_gadget_set_ep_config(struct dwc3 * dwc,struct dwc3_ep * dep,const struct usb_endpoint_descriptor * desc,const struct usb_ss_ep_comp_descriptor * comp_desc)383 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
384 const struct usb_endpoint_descriptor *desc,
385 const struct usb_ss_ep_comp_descriptor *comp_desc)
386 {
387 struct dwc3_gadget_ep_cmd_params params;
388
389 memset(¶ms, 0x00, sizeof(params));
390
391 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
392 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc))
393 | DWC3_DEPCFG_BURST_SIZE(dep->endpoint.maxburst);
394
395 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
396 | DWC3_DEPCFG_XFER_NOT_READY_EN;
397
398 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
399 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
400 | DWC3_DEPCFG_STREAM_EVENT_EN;
401 dep->stream_capable = true;
402 }
403
404 if (usb_endpoint_xfer_isoc(desc))
405 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
406
407 /*
408 * We are doing 1:1 mapping for endpoints, meaning
409 * Physical Endpoints 2 maps to Logical Endpoint 2 and
410 * so on. We consider the direction bit as part of the physical
411 * endpoint number. So USB endpoint 0x81 is 0x03.
412 */
413 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
414
415 /*
416 * We must use the lower 16 TX FIFOs even though
417 * HW might have more
418 */
419 if (dep->direction)
420 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
421
422 if (desc->bInterval) {
423 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
424 dep->interval = 1 << (desc->bInterval - 1);
425 }
426
427 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
428 DWC3_DEPCMD_SETEPCONFIG, ¶ms);
429 }
430
dwc3_gadget_set_xfer_resource(struct dwc3 * dwc,struct dwc3_ep * dep)431 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
432 {
433 struct dwc3_gadget_ep_cmd_params params;
434
435 memset(¶ms, 0x00, sizeof(params));
436
437 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
438
439 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
440 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms);
441 }
442
443 /**
444 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
445 * @dep: endpoint to be initialized
446 * @desc: USB Endpoint Descriptor
447 *
448 * Caller should take care of locking
449 */
__dwc3_gadget_ep_enable(struct dwc3_ep * dep,const struct usb_endpoint_descriptor * desc,const struct usb_ss_ep_comp_descriptor * comp_desc)450 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
451 const struct usb_endpoint_descriptor *desc,
452 const struct usb_ss_ep_comp_descriptor *comp_desc)
453 {
454 struct dwc3 *dwc = dep->dwc;
455 u32 reg;
456 int ret = -ENOMEM;
457
458 if (!(dep->flags & DWC3_EP_ENABLED)) {
459 ret = dwc3_gadget_start_config(dwc, dep);
460 if (ret)
461 return ret;
462 }
463
464 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
465 if (ret)
466 return ret;
467
468 if (!(dep->flags & DWC3_EP_ENABLED)) {
469 struct dwc3_trb *trb_st_hw;
470 struct dwc3_trb *trb_link;
471
472 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
473 if (ret)
474 return ret;
475
476 dep->desc = desc;
477 dep->comp_desc = comp_desc;
478 dep->type = usb_endpoint_type(desc);
479 dep->flags |= DWC3_EP_ENABLED;
480
481 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
482 reg |= DWC3_DALEPENA_EP(dep->number);
483 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
484
485 if (!usb_endpoint_xfer_isoc(desc))
486 return 0;
487
488 memset(&trb_link, 0, sizeof(trb_link));
489
490 /* Link TRB for ISOC. The HWO bit is never reset */
491 trb_st_hw = &dep->trb_pool[0];
492
493 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
494
495 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
496 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
497 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
498 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
499 }
500
501 return 0;
502 }
503
504 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum);
dwc3_remove_requests(struct dwc3 * dwc,struct dwc3_ep * dep)505 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
506 {
507 struct dwc3_request *req;
508
509 if (!list_empty(&dep->req_queued))
510 dwc3_stop_active_transfer(dwc, dep->number);
511
512 while (!list_empty(&dep->request_list)) {
513 req = next_request(&dep->request_list);
514
515 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
516 }
517 }
518
519 /**
520 * __dwc3_gadget_ep_disable - Disables a HW endpoint
521 * @dep: the endpoint to disable
522 *
523 * This function also removes requests which are currently processed ny the
524 * hardware and those which are not yet scheduled.
525 * Caller should take care of locking.
526 */
__dwc3_gadget_ep_disable(struct dwc3_ep * dep)527 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
528 {
529 struct dwc3 *dwc = dep->dwc;
530 u32 reg;
531
532 dwc3_remove_requests(dwc, dep);
533
534 /* make sure HW endpoint isn't stalled */
535 if (dep->flags & DWC3_EP_STALL)
536 __dwc3_gadget_ep_set_halt(dep, 0);
537
538 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
539 reg &= ~DWC3_DALEPENA_EP(dep->number);
540 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
541
542 dep->stream_capable = false;
543 dep->desc = NULL;
544 dep->endpoint.desc = NULL;
545 dep->comp_desc = NULL;
546 dep->type = 0;
547 dep->flags = 0;
548
549 return 0;
550 }
551
552 /* -------------------------------------------------------------------------- */
553
dwc3_gadget_ep0_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)554 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
555 const struct usb_endpoint_descriptor *desc)
556 {
557 return -EINVAL;
558 }
559
dwc3_gadget_ep0_disable(struct usb_ep * ep)560 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
561 {
562 return -EINVAL;
563 }
564
565 /* -------------------------------------------------------------------------- */
566
dwc3_gadget_ep_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)567 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
568 const struct usb_endpoint_descriptor *desc)
569 {
570 struct dwc3_ep *dep;
571 struct dwc3 *dwc;
572 unsigned long flags;
573 int ret;
574
575 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
576 pr_debug("dwc3: invalid parameters\n");
577 return -EINVAL;
578 }
579
580 if (!desc->wMaxPacketSize) {
581 pr_debug("dwc3: missing wMaxPacketSize\n");
582 return -EINVAL;
583 }
584
585 dep = to_dwc3_ep(ep);
586 dwc = dep->dwc;
587
588 switch (usb_endpoint_type(desc)) {
589 case USB_ENDPOINT_XFER_CONTROL:
590 strlcat(dep->name, "-control", sizeof(dep->name));
591 break;
592 case USB_ENDPOINT_XFER_ISOC:
593 strlcat(dep->name, "-isoc", sizeof(dep->name));
594 break;
595 case USB_ENDPOINT_XFER_BULK:
596 strlcat(dep->name, "-bulk", sizeof(dep->name));
597 break;
598 case USB_ENDPOINT_XFER_INT:
599 strlcat(dep->name, "-int", sizeof(dep->name));
600 break;
601 default:
602 dev_err(dwc->dev, "invalid endpoint transfer type\n");
603 }
604
605 if (dep->flags & DWC3_EP_ENABLED) {
606 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
607 dep->name);
608 return 0;
609 }
610
611 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
612
613 spin_lock_irqsave(&dwc->lock, flags);
614 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
615 spin_unlock_irqrestore(&dwc->lock, flags);
616
617 return ret;
618 }
619
dwc3_gadget_ep_disable(struct usb_ep * ep)620 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
621 {
622 struct dwc3_ep *dep;
623 struct dwc3 *dwc;
624 unsigned long flags;
625 int ret;
626
627 if (!ep) {
628 pr_debug("dwc3: invalid parameters\n");
629 return -EINVAL;
630 }
631
632 dep = to_dwc3_ep(ep);
633 dwc = dep->dwc;
634
635 if (!(dep->flags & DWC3_EP_ENABLED)) {
636 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
637 dep->name);
638 return 0;
639 }
640
641 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
642 dep->number >> 1,
643 (dep->number & 1) ? "in" : "out");
644
645 spin_lock_irqsave(&dwc->lock, flags);
646 ret = __dwc3_gadget_ep_disable(dep);
647 spin_unlock_irqrestore(&dwc->lock, flags);
648
649 return ret;
650 }
651
dwc3_gadget_ep_alloc_request(struct usb_ep * ep,gfp_t gfp_flags)652 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
653 gfp_t gfp_flags)
654 {
655 struct dwc3_request *req;
656 struct dwc3_ep *dep = to_dwc3_ep(ep);
657 struct dwc3 *dwc = dep->dwc;
658
659 req = kzalloc(sizeof(*req), gfp_flags);
660 if (!req) {
661 dev_err(dwc->dev, "not enough memory\n");
662 return NULL;
663 }
664
665 req->epnum = dep->number;
666 req->dep = dep;
667
668 return &req->request;
669 }
670
dwc3_gadget_ep_free_request(struct usb_ep * ep,struct usb_request * request)671 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
672 struct usb_request *request)
673 {
674 struct dwc3_request *req = to_dwc3_request(request);
675
676 kfree(req);
677 }
678
679 /**
680 * dwc3_prepare_one_trb - setup one TRB from one request
681 * @dep: endpoint for which this request is prepared
682 * @req: dwc3_request pointer
683 */
dwc3_prepare_one_trb(struct dwc3_ep * dep,struct dwc3_request * req,dma_addr_t dma,unsigned length,unsigned last,unsigned chain)684 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
685 struct dwc3_request *req, dma_addr_t dma,
686 unsigned length, unsigned last, unsigned chain)
687 {
688 struct dwc3 *dwc = dep->dwc;
689 struct dwc3_trb *trb;
690
691 unsigned int cur_slot;
692
693 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
694 dep->name, req, (unsigned long long) dma,
695 length, last ? " last" : "",
696 chain ? " chain" : "");
697
698 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
699 cur_slot = dep->free_slot;
700 dep->free_slot++;
701
702 /* Skip the LINK-TRB on ISOC */
703 if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
704 usb_endpoint_xfer_isoc(dep->desc))
705 return;
706
707 if (!req->trb) {
708 dwc3_gadget_move_request_queued(req);
709 req->trb = trb;
710 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
711 }
712
713 trb->size = DWC3_TRB_SIZE_LENGTH(length);
714 trb->bpl = lower_32_bits(dma);
715 trb->bph = upper_32_bits(dma);
716
717 switch (usb_endpoint_type(dep->desc)) {
718 case USB_ENDPOINT_XFER_CONTROL:
719 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
720 break;
721
722 case USB_ENDPOINT_XFER_ISOC:
723 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
724
725 /* IOC every DWC3_TRB_NUM / 4 so we can refill */
726 if (!(cur_slot % (DWC3_TRB_NUM / 4)))
727 trb->ctrl |= DWC3_TRB_CTRL_IOC;
728 break;
729
730 case USB_ENDPOINT_XFER_BULK:
731 case USB_ENDPOINT_XFER_INT:
732 trb->ctrl = DWC3_TRBCTL_NORMAL;
733 break;
734 default:
735 /*
736 * This is only possible with faulty memory because we
737 * checked it already :)
738 */
739 BUG();
740 }
741
742 if (usb_endpoint_xfer_isoc(dep->desc)) {
743 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
744 trb->ctrl |= DWC3_TRB_CTRL_CSP;
745 } else {
746 if (chain)
747 trb->ctrl |= DWC3_TRB_CTRL_CHN;
748
749 if (last)
750 trb->ctrl |= DWC3_TRB_CTRL_LST;
751 }
752
753 if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
754 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
755
756 trb->ctrl |= DWC3_TRB_CTRL_HWO;
757 }
758
759 /*
760 * dwc3_prepare_trbs - setup TRBs from requests
761 * @dep: endpoint for which requests are being prepared
762 * @starting: true if the endpoint is idle and no requests are queued.
763 *
764 * The function goes through the requests list and sets up TRBs for the
765 * transfers. The function returns once there are no more TRBs available or
766 * it runs out of requests.
767 */
dwc3_prepare_trbs(struct dwc3_ep * dep,bool starting)768 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
769 {
770 struct dwc3_request *req, *n;
771 u32 trbs_left;
772 u32 max;
773 unsigned int last_one = 0;
774
775 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
776
777 /* the first request must not be queued */
778 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
779
780 /* Can't wrap around on a non-isoc EP since there's no link TRB */
781 if (!usb_endpoint_xfer_isoc(dep->desc)) {
782 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
783 if (trbs_left > max)
784 trbs_left = max;
785 }
786
787 /*
788 * If busy & slot are equal than it is either full or empty. If we are
789 * starting to process requests then we are empty. Otherwise we are
790 * full and don't do anything
791 */
792 if (!trbs_left) {
793 if (!starting)
794 return;
795 trbs_left = DWC3_TRB_NUM;
796 /*
797 * In case we start from scratch, we queue the ISOC requests
798 * starting from slot 1. This is done because we use ring
799 * buffer and have no LST bit to stop us. Instead, we place
800 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
801 * after the first request so we start at slot 1 and have
802 * 7 requests proceed before we hit the first IOC.
803 * Other transfer types don't use the ring buffer and are
804 * processed from the first TRB until the last one. Since we
805 * don't wrap around we have to start at the beginning.
806 */
807 if (usb_endpoint_xfer_isoc(dep->desc)) {
808 dep->busy_slot = 1;
809 dep->free_slot = 1;
810 } else {
811 dep->busy_slot = 0;
812 dep->free_slot = 0;
813 }
814 }
815
816 /* The last TRB is a link TRB, not used for xfer */
817 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
818 return;
819
820 list_for_each_entry_safe(req, n, &dep->request_list, list) {
821 unsigned length;
822 dma_addr_t dma;
823
824 if (req->request.num_mapped_sgs > 0) {
825 struct usb_request *request = &req->request;
826 struct scatterlist *sg = request->sg;
827 struct scatterlist *s;
828 int i;
829
830 for_each_sg(sg, s, request->num_mapped_sgs, i) {
831 unsigned chain = true;
832
833 length = sg_dma_len(s);
834 dma = sg_dma_address(s);
835
836 if (i == (request->num_mapped_sgs - 1) ||
837 sg_is_last(s)) {
838 last_one = true;
839 chain = false;
840 }
841
842 trbs_left--;
843 if (!trbs_left)
844 last_one = true;
845
846 if (last_one)
847 chain = false;
848
849 dwc3_prepare_one_trb(dep, req, dma, length,
850 last_one, chain);
851
852 if (last_one)
853 break;
854 }
855 } else {
856 dma = req->request.dma;
857 length = req->request.length;
858 trbs_left--;
859
860 if (!trbs_left)
861 last_one = 1;
862
863 /* Is this the last request? */
864 if (list_is_last(&req->list, &dep->request_list))
865 last_one = 1;
866
867 dwc3_prepare_one_trb(dep, req, dma, length,
868 last_one, false);
869
870 if (last_one)
871 break;
872 }
873 }
874 }
875
__dwc3_gadget_kick_transfer(struct dwc3_ep * dep,u16 cmd_param,int start_new)876 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
877 int start_new)
878 {
879 struct dwc3_gadget_ep_cmd_params params;
880 struct dwc3_request *req;
881 struct dwc3 *dwc = dep->dwc;
882 int ret;
883 u32 cmd;
884
885 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
886 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
887 return -EBUSY;
888 }
889 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
890
891 /*
892 * If we are getting here after a short-out-packet we don't enqueue any
893 * new requests as we try to set the IOC bit only on the last request.
894 */
895 if (start_new) {
896 if (list_empty(&dep->req_queued))
897 dwc3_prepare_trbs(dep, start_new);
898
899 /* req points to the first request which will be sent */
900 req = next_request(&dep->req_queued);
901 } else {
902 dwc3_prepare_trbs(dep, start_new);
903
904 /*
905 * req points to the first request where HWO changed from 0 to 1
906 */
907 req = next_request(&dep->req_queued);
908 }
909 if (!req) {
910 dep->flags |= DWC3_EP_PENDING_REQUEST;
911 return 0;
912 }
913
914 memset(¶ms, 0, sizeof(params));
915 params.param0 = upper_32_bits(req->trb_dma);
916 params.param1 = lower_32_bits(req->trb_dma);
917
918 if (start_new)
919 cmd = DWC3_DEPCMD_STARTTRANSFER;
920 else
921 cmd = DWC3_DEPCMD_UPDATETRANSFER;
922
923 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
924 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
925 if (ret < 0) {
926 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
927
928 /*
929 * FIXME we need to iterate over the list of requests
930 * here and stop, unmap, free and del each of the linked
931 * requests instead of what we do now.
932 */
933 usb_gadget_unmap_request(&dwc->gadget, &req->request,
934 req->direction);
935 list_del(&req->list);
936 return ret;
937 }
938
939 dep->flags |= DWC3_EP_BUSY;
940 dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
941 dep->number);
942
943 WARN_ON_ONCE(!dep->res_trans_idx);
944
945 return 0;
946 }
947
__dwc3_gadget_ep_queue(struct dwc3_ep * dep,struct dwc3_request * req)948 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
949 {
950 struct dwc3 *dwc = dep->dwc;
951 int ret;
952
953 req->request.actual = 0;
954 req->request.status = -EINPROGRESS;
955 req->direction = dep->direction;
956 req->epnum = dep->number;
957
958 /*
959 * We only add to our list of requests now and
960 * start consuming the list once we get XferNotReady
961 * IRQ.
962 *
963 * That way, we avoid doing anything that we don't need
964 * to do now and defer it until the point we receive a
965 * particular token from the Host side.
966 *
967 * This will also avoid Host cancelling URBs due to too
968 * many NAKs.
969 */
970 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
971 dep->direction);
972 if (ret)
973 return ret;
974
975 list_add_tail(&req->list, &dep->request_list);
976
977 /*
978 * There is one special case: XferNotReady with
979 * empty list of requests. We need to kick the
980 * transfer here in that situation, otherwise
981 * we will be NAKing forever.
982 *
983 * If we get XferNotReady before gadget driver
984 * has a chance to queue a request, we will ACK
985 * the IRQ but won't be able to receive the data
986 * until the next request is queued. The following
987 * code is handling exactly that.
988 */
989 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
990 int ret;
991 int start_trans;
992
993 start_trans = 1;
994 if (usb_endpoint_xfer_isoc(dep->desc) &&
995 (dep->flags & DWC3_EP_BUSY))
996 start_trans = 0;
997
998 ret = __dwc3_gadget_kick_transfer(dep, 0, start_trans);
999 if (ret && ret != -EBUSY) {
1000 struct dwc3 *dwc = dep->dwc;
1001
1002 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1003 dep->name);
1004 }
1005 };
1006
1007 return 0;
1008 }
1009
dwc3_gadget_ep_queue(struct usb_ep * ep,struct usb_request * request,gfp_t gfp_flags)1010 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1011 gfp_t gfp_flags)
1012 {
1013 struct dwc3_request *req = to_dwc3_request(request);
1014 struct dwc3_ep *dep = to_dwc3_ep(ep);
1015 struct dwc3 *dwc = dep->dwc;
1016
1017 unsigned long flags;
1018
1019 int ret;
1020
1021 if (!dep->desc) {
1022 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1023 request, ep->name);
1024 return -ESHUTDOWN;
1025 }
1026
1027 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1028 request, ep->name, request->length);
1029
1030 spin_lock_irqsave(&dwc->lock, flags);
1031 ret = __dwc3_gadget_ep_queue(dep, req);
1032 spin_unlock_irqrestore(&dwc->lock, flags);
1033
1034 return ret;
1035 }
1036
dwc3_gadget_ep_dequeue(struct usb_ep * ep,struct usb_request * request)1037 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1038 struct usb_request *request)
1039 {
1040 struct dwc3_request *req = to_dwc3_request(request);
1041 struct dwc3_request *r = NULL;
1042
1043 struct dwc3_ep *dep = to_dwc3_ep(ep);
1044 struct dwc3 *dwc = dep->dwc;
1045
1046 unsigned long flags;
1047 int ret = 0;
1048
1049 spin_lock_irqsave(&dwc->lock, flags);
1050
1051 list_for_each_entry(r, &dep->request_list, list) {
1052 if (r == req)
1053 break;
1054 }
1055
1056 if (r != req) {
1057 list_for_each_entry(r, &dep->req_queued, list) {
1058 if (r == req)
1059 break;
1060 }
1061 if (r == req) {
1062 /* wait until it is processed */
1063 dwc3_stop_active_transfer(dwc, dep->number);
1064 goto out0;
1065 }
1066 dev_err(dwc->dev, "request %p was not queued to %s\n",
1067 request, ep->name);
1068 ret = -EINVAL;
1069 goto out0;
1070 }
1071
1072 /* giveback the request */
1073 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1074
1075 out0:
1076 spin_unlock_irqrestore(&dwc->lock, flags);
1077
1078 return ret;
1079 }
1080
__dwc3_gadget_ep_set_halt(struct dwc3_ep * dep,int value)1081 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
1082 {
1083 struct dwc3_gadget_ep_cmd_params params;
1084 struct dwc3 *dwc = dep->dwc;
1085 int ret;
1086
1087 memset(¶ms, 0x00, sizeof(params));
1088
1089 if (value) {
1090 if (dep->number == 0 || dep->number == 1) {
1091 /*
1092 * Whenever EP0 is stalled, we will restart
1093 * the state machine, thus moving back to
1094 * Setup Phase
1095 */
1096 dwc->ep0state = EP0_SETUP_PHASE;
1097 }
1098
1099 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1100 DWC3_DEPCMD_SETSTALL, ¶ms);
1101 if (ret)
1102 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1103 value ? "set" : "clear",
1104 dep->name);
1105 else
1106 dep->flags |= DWC3_EP_STALL;
1107 } else {
1108 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1109 DWC3_DEPCMD_CLEARSTALL, ¶ms);
1110 if (ret)
1111 dev_err(dwc->dev, "failed to %s STALL on %s\n",
1112 value ? "set" : "clear",
1113 dep->name);
1114 else
1115 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1116 }
1117
1118 return ret;
1119 }
1120
dwc3_gadget_ep_set_halt(struct usb_ep * ep,int value)1121 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1122 {
1123 struct dwc3_ep *dep = to_dwc3_ep(ep);
1124 struct dwc3 *dwc = dep->dwc;
1125
1126 unsigned long flags;
1127
1128 int ret;
1129
1130 spin_lock_irqsave(&dwc->lock, flags);
1131
1132 if (usb_endpoint_xfer_isoc(dep->desc)) {
1133 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1134 ret = -EINVAL;
1135 goto out;
1136 }
1137
1138 ret = __dwc3_gadget_ep_set_halt(dep, value);
1139 out:
1140 spin_unlock_irqrestore(&dwc->lock, flags);
1141
1142 return ret;
1143 }
1144
dwc3_gadget_ep_set_wedge(struct usb_ep * ep)1145 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1146 {
1147 struct dwc3_ep *dep = to_dwc3_ep(ep);
1148 struct dwc3 *dwc = dep->dwc;
1149 unsigned long flags;
1150
1151 spin_lock_irqsave(&dwc->lock, flags);
1152 dep->flags |= DWC3_EP_WEDGE;
1153 spin_unlock_irqrestore(&dwc->lock, flags);
1154
1155 return dwc3_gadget_ep_set_halt(ep, 1);
1156 }
1157
1158 /* -------------------------------------------------------------------------- */
1159
1160 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1161 .bLength = USB_DT_ENDPOINT_SIZE,
1162 .bDescriptorType = USB_DT_ENDPOINT,
1163 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1164 };
1165
1166 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1167 .enable = dwc3_gadget_ep0_enable,
1168 .disable = dwc3_gadget_ep0_disable,
1169 .alloc_request = dwc3_gadget_ep_alloc_request,
1170 .free_request = dwc3_gadget_ep_free_request,
1171 .queue = dwc3_gadget_ep0_queue,
1172 .dequeue = dwc3_gadget_ep_dequeue,
1173 .set_halt = dwc3_gadget_ep_set_halt,
1174 .set_wedge = dwc3_gadget_ep_set_wedge,
1175 };
1176
1177 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1178 .enable = dwc3_gadget_ep_enable,
1179 .disable = dwc3_gadget_ep_disable,
1180 .alloc_request = dwc3_gadget_ep_alloc_request,
1181 .free_request = dwc3_gadget_ep_free_request,
1182 .queue = dwc3_gadget_ep_queue,
1183 .dequeue = dwc3_gadget_ep_dequeue,
1184 .set_halt = dwc3_gadget_ep_set_halt,
1185 .set_wedge = dwc3_gadget_ep_set_wedge,
1186 };
1187
1188 /* -------------------------------------------------------------------------- */
1189
dwc3_gadget_get_frame(struct usb_gadget * g)1190 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1191 {
1192 struct dwc3 *dwc = gadget_to_dwc(g);
1193 u32 reg;
1194
1195 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1196 return DWC3_DSTS_SOFFN(reg);
1197 }
1198
dwc3_gadget_wakeup(struct usb_gadget * g)1199 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1200 {
1201 struct dwc3 *dwc = gadget_to_dwc(g);
1202
1203 unsigned long timeout;
1204 unsigned long flags;
1205
1206 u32 reg;
1207
1208 int ret = 0;
1209
1210 u8 link_state;
1211 u8 speed;
1212
1213 spin_lock_irqsave(&dwc->lock, flags);
1214
1215 /*
1216 * According to the Databook Remote wakeup request should
1217 * be issued only when the device is in early suspend state.
1218 *
1219 * We can check that via USB Link State bits in DSTS register.
1220 */
1221 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1222
1223 speed = reg & DWC3_DSTS_CONNECTSPD;
1224 if (speed == DWC3_DSTS_SUPERSPEED) {
1225 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1226 ret = -EINVAL;
1227 goto out;
1228 }
1229
1230 link_state = DWC3_DSTS_USBLNKST(reg);
1231
1232 switch (link_state) {
1233 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1234 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1235 break;
1236 default:
1237 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1238 link_state);
1239 ret = -EINVAL;
1240 goto out;
1241 }
1242
1243 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1244 if (ret < 0) {
1245 dev_err(dwc->dev, "failed to put link in Recovery\n");
1246 goto out;
1247 }
1248
1249 /* write zeroes to Link Change Request */
1250 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1251 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1252
1253 /* poll until Link State changes to ON */
1254 timeout = jiffies + msecs_to_jiffies(100);
1255
1256 while (!time_after(jiffies, timeout)) {
1257 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1258
1259 /* in HS, means ON */
1260 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1261 break;
1262 }
1263
1264 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1265 dev_err(dwc->dev, "failed to send remote wakeup\n");
1266 ret = -EINVAL;
1267 }
1268
1269 out:
1270 spin_unlock_irqrestore(&dwc->lock, flags);
1271
1272 return ret;
1273 }
1274
dwc3_gadget_set_selfpowered(struct usb_gadget * g,int is_selfpowered)1275 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1276 int is_selfpowered)
1277 {
1278 struct dwc3 *dwc = gadget_to_dwc(g);
1279 unsigned long flags;
1280
1281 spin_lock_irqsave(&dwc->lock, flags);
1282 dwc->is_selfpowered = !!is_selfpowered;
1283 spin_unlock_irqrestore(&dwc->lock, flags);
1284
1285 return 0;
1286 }
1287
dwc3_gadget_run_stop(struct dwc3 * dwc,int is_on)1288 static void dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
1289 {
1290 u32 reg;
1291 u32 timeout = 500;
1292
1293 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1294 if (is_on) {
1295 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1296 reg |= (DWC3_DCTL_RUN_STOP
1297 | DWC3_DCTL_TRGTULST_RX_DET);
1298 } else {
1299 reg &= ~DWC3_DCTL_RUN_STOP;
1300 }
1301
1302 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1303
1304 do {
1305 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1306 if (is_on) {
1307 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1308 break;
1309 } else {
1310 if (reg & DWC3_DSTS_DEVCTRLHLT)
1311 break;
1312 }
1313 timeout--;
1314 if (!timeout)
1315 break;
1316 udelay(1);
1317 } while (1);
1318
1319 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1320 dwc->gadget_driver
1321 ? dwc->gadget_driver->function : "no-function",
1322 is_on ? "connect" : "disconnect");
1323 }
1324
dwc3_gadget_pullup(struct usb_gadget * g,int is_on)1325 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1326 {
1327 struct dwc3 *dwc = gadget_to_dwc(g);
1328 unsigned long flags;
1329
1330 is_on = !!is_on;
1331
1332 spin_lock_irqsave(&dwc->lock, flags);
1333 dwc3_gadget_run_stop(dwc, is_on);
1334 spin_unlock_irqrestore(&dwc->lock, flags);
1335
1336 return 0;
1337 }
1338
dwc3_gadget_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1339 static int dwc3_gadget_start(struct usb_gadget *g,
1340 struct usb_gadget_driver *driver)
1341 {
1342 struct dwc3 *dwc = gadget_to_dwc(g);
1343 struct dwc3_ep *dep;
1344 unsigned long flags;
1345 int ret = 0;
1346 u32 reg;
1347
1348 spin_lock_irqsave(&dwc->lock, flags);
1349
1350 if (dwc->gadget_driver) {
1351 dev_err(dwc->dev, "%s is already bound to %s\n",
1352 dwc->gadget.name,
1353 dwc->gadget_driver->driver.name);
1354 ret = -EBUSY;
1355 goto err0;
1356 }
1357
1358 dwc->gadget_driver = driver;
1359 dwc->gadget.dev.driver = &driver->driver;
1360
1361 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1362 reg &= ~(DWC3_DCFG_SPEED_MASK);
1363 reg |= dwc->maximum_speed;
1364 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1365
1366 dwc->start_config_issued = false;
1367
1368 /* Start with SuperSpeed Default */
1369 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1370
1371 dep = dwc->eps[0];
1372 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1373 if (ret) {
1374 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1375 goto err0;
1376 }
1377
1378 dep = dwc->eps[1];
1379 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
1380 if (ret) {
1381 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1382 goto err1;
1383 }
1384
1385 /* begin to receive SETUP packets */
1386 dwc->ep0state = EP0_SETUP_PHASE;
1387 dwc3_ep0_out_start(dwc);
1388
1389 spin_unlock_irqrestore(&dwc->lock, flags);
1390
1391 return 0;
1392
1393 err1:
1394 __dwc3_gadget_ep_disable(dwc->eps[0]);
1395
1396 err0:
1397 dwc->gadget_driver = NULL;
1398 spin_unlock_irqrestore(&dwc->lock, flags);
1399
1400 return ret;
1401 }
1402
dwc3_gadget_stop(struct usb_gadget * g,struct usb_gadget_driver * driver)1403 static int dwc3_gadget_stop(struct usb_gadget *g,
1404 struct usb_gadget_driver *driver)
1405 {
1406 struct dwc3 *dwc = gadget_to_dwc(g);
1407 unsigned long flags;
1408
1409 spin_lock_irqsave(&dwc->lock, flags);
1410
1411 __dwc3_gadget_ep_disable(dwc->eps[0]);
1412 __dwc3_gadget_ep_disable(dwc->eps[1]);
1413
1414 dwc->gadget_driver = NULL;
1415 dwc->gadget.dev.driver = NULL;
1416
1417 spin_unlock_irqrestore(&dwc->lock, flags);
1418
1419 return 0;
1420 }
1421 static const struct usb_gadget_ops dwc3_gadget_ops = {
1422 .get_frame = dwc3_gadget_get_frame,
1423 .wakeup = dwc3_gadget_wakeup,
1424 .set_selfpowered = dwc3_gadget_set_selfpowered,
1425 .pullup = dwc3_gadget_pullup,
1426 .udc_start = dwc3_gadget_start,
1427 .udc_stop = dwc3_gadget_stop,
1428 };
1429
1430 /* -------------------------------------------------------------------------- */
1431
dwc3_gadget_init_endpoints(struct dwc3 * dwc)1432 static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1433 {
1434 struct dwc3_ep *dep;
1435 u8 epnum;
1436
1437 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1438
1439 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1440 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1441 if (!dep) {
1442 dev_err(dwc->dev, "can't allocate endpoint %d\n",
1443 epnum);
1444 return -ENOMEM;
1445 }
1446
1447 dep->dwc = dwc;
1448 dep->number = epnum;
1449 dwc->eps[epnum] = dep;
1450
1451 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1452 (epnum & 1) ? "in" : "out");
1453 dep->endpoint.name = dep->name;
1454 dep->direction = (epnum & 1);
1455
1456 if (epnum == 0 || epnum == 1) {
1457 dep->endpoint.maxpacket = 512;
1458 dep->endpoint.maxburst = 1;
1459 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1460 if (!epnum)
1461 dwc->gadget.ep0 = &dep->endpoint;
1462 } else {
1463 int ret;
1464
1465 dep->endpoint.maxpacket = 1024;
1466 dep->endpoint.max_streams = 15;
1467 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1468 list_add_tail(&dep->endpoint.ep_list,
1469 &dwc->gadget.ep_list);
1470
1471 ret = dwc3_alloc_trb_pool(dep);
1472 if (ret)
1473 return ret;
1474 }
1475
1476 INIT_LIST_HEAD(&dep->request_list);
1477 INIT_LIST_HEAD(&dep->req_queued);
1478 }
1479
1480 return 0;
1481 }
1482
dwc3_gadget_free_endpoints(struct dwc3 * dwc)1483 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1484 {
1485 struct dwc3_ep *dep;
1486 u8 epnum;
1487
1488 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1489 dep = dwc->eps[epnum];
1490 /*
1491 * Physical endpoints 0 and 1 are special; they form the
1492 * bi-directional USB endpoint 0.
1493 *
1494 * For those two physical endpoints, we don't allocate a TRB
1495 * pool nor do we add them the endpoints list. Due to that, we
1496 * shouldn't do these two operations otherwise we would end up
1497 * with all sorts of bugs when removing dwc3.ko.
1498 */
1499 if (epnum != 0 && epnum != 1) {
1500 dwc3_free_trb_pool(dep);
1501 list_del(&dep->endpoint.ep_list);
1502 }
1503
1504 kfree(dep);
1505 }
1506 }
1507
dwc3_gadget_release(struct device * dev)1508 static void dwc3_gadget_release(struct device *dev)
1509 {
1510 dev_dbg(dev, "%s\n", __func__);
1511 }
1512
1513 /* -------------------------------------------------------------------------- */
dwc3_cleanup_done_reqs(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event,int status)1514 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1515 const struct dwc3_event_depevt *event, int status)
1516 {
1517 struct dwc3_request *req;
1518 struct dwc3_trb *trb;
1519 unsigned int count;
1520 unsigned int s_pkt = 0;
1521
1522 do {
1523 req = next_request(&dep->req_queued);
1524 if (!req) {
1525 WARN_ON_ONCE(1);
1526 return 1;
1527 }
1528
1529 trb = req->trb;
1530
1531 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1532 /*
1533 * We continue despite the error. There is not much we
1534 * can do. If we don't clean it up we loop forever. If
1535 * we skip the TRB then it gets overwritten after a
1536 * while since we use them in a ring buffer. A BUG()
1537 * would help. Lets hope that if this occurs, someone
1538 * fixes the root cause instead of looking away :)
1539 */
1540 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1541 dep->name, req->trb);
1542 count = trb->size & DWC3_TRB_SIZE_MASK;
1543
1544 if (dep->direction) {
1545 if (count) {
1546 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1547 dep->name);
1548 status = -ECONNRESET;
1549 }
1550 } else {
1551 if (count && (event->status & DEPEVT_STATUS_SHORT))
1552 s_pkt = 1;
1553 }
1554
1555 /*
1556 * We assume here we will always receive the entire data block
1557 * which we should receive. Meaning, if we program RX to
1558 * receive 4K but we receive only 2K, we assume that's all we
1559 * should receive and we simply bounce the request back to the
1560 * gadget driver for further processing.
1561 */
1562 req->request.actual += req->request.length - count;
1563 dwc3_gadget_giveback(dep, req, status);
1564 if (s_pkt)
1565 break;
1566 if ((event->status & DEPEVT_STATUS_LST) &&
1567 (trb->ctrl & DWC3_TRB_CTRL_LST))
1568 break;
1569 if ((event->status & DEPEVT_STATUS_IOC) &&
1570 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1571 break;
1572 } while (1);
1573
1574 if ((event->status & DEPEVT_STATUS_IOC) &&
1575 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1576 return 0;
1577 return 1;
1578 }
1579
dwc3_endpoint_transfer_complete(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event,int start_new)1580 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1581 struct dwc3_ep *dep, const struct dwc3_event_depevt *event,
1582 int start_new)
1583 {
1584 unsigned status = 0;
1585 int clean_busy;
1586
1587 if (event->status & DEPEVT_STATUS_BUSERR)
1588 status = -ECONNRESET;
1589
1590 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1591 if (clean_busy)
1592 dep->flags &= ~DWC3_EP_BUSY;
1593
1594 /*
1595 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1596 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1597 */
1598 if (dwc->revision < DWC3_REVISION_183A) {
1599 u32 reg;
1600 int i;
1601
1602 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1603 struct dwc3_ep *dep = dwc->eps[i];
1604
1605 if (!(dep->flags & DWC3_EP_ENABLED))
1606 continue;
1607
1608 if (!list_empty(&dep->req_queued))
1609 return;
1610 }
1611
1612 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1613 reg |= dwc->u1u2;
1614 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1615
1616 dwc->u1u2 = 0;
1617 }
1618 }
1619
dwc3_gadget_start_isoc(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event)1620 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1621 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1622 {
1623 u32 uf, mask;
1624
1625 if (list_empty(&dep->request_list)) {
1626 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1627 dep->name);
1628 return;
1629 }
1630
1631 mask = ~(dep->interval - 1);
1632 uf = event->parameters & mask;
1633 /* 4 micro frames in the future */
1634 uf += dep->interval * 4;
1635
1636 __dwc3_gadget_kick_transfer(dep, uf, 1);
1637 }
1638
dwc3_process_ep_cmd_complete(struct dwc3_ep * dep,const struct dwc3_event_depevt * event)1639 static void dwc3_process_ep_cmd_complete(struct dwc3_ep *dep,
1640 const struct dwc3_event_depevt *event)
1641 {
1642 struct dwc3 *dwc = dep->dwc;
1643 struct dwc3_event_depevt mod_ev = *event;
1644
1645 /*
1646 * We were asked to remove one request. It is possible that this
1647 * request and a few others were started together and have the same
1648 * transfer index. Since we stopped the complete endpoint we don't
1649 * know how many requests were already completed (and not yet)
1650 * reported and how could be done (later). We purge them all until
1651 * the end of the list.
1652 */
1653 mod_ev.status = DEPEVT_STATUS_LST;
1654 dwc3_cleanup_done_reqs(dwc, dep, &mod_ev, -ESHUTDOWN);
1655 dep->flags &= ~DWC3_EP_BUSY;
1656 /* pending requests are ignored and are queued on XferNotReady */
1657 }
1658
dwc3_ep_cmd_compl(struct dwc3_ep * dep,const struct dwc3_event_depevt * event)1659 static void dwc3_ep_cmd_compl(struct dwc3_ep *dep,
1660 const struct dwc3_event_depevt *event)
1661 {
1662 u32 param = event->parameters;
1663 u32 cmd_type = (param >> 8) & ((1 << 5) - 1);
1664
1665 switch (cmd_type) {
1666 case DWC3_DEPCMD_ENDTRANSFER:
1667 dwc3_process_ep_cmd_complete(dep, event);
1668 break;
1669 case DWC3_DEPCMD_STARTTRANSFER:
1670 dep->res_trans_idx = param & 0x7f;
1671 break;
1672 default:
1673 printk(KERN_ERR "%s() unknown /unexpected type: %d\n",
1674 __func__, cmd_type);
1675 break;
1676 };
1677 }
1678
dwc3_endpoint_interrupt(struct dwc3 * dwc,const struct dwc3_event_depevt * event)1679 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1680 const struct dwc3_event_depevt *event)
1681 {
1682 struct dwc3_ep *dep;
1683 u8 epnum = event->endpoint_number;
1684
1685 dep = dwc->eps[epnum];
1686
1687 dev_vdbg(dwc->dev, "%s: %s\n", dep->name,
1688 dwc3_ep_event_string(event->endpoint_event));
1689
1690 if (epnum == 0 || epnum == 1) {
1691 dwc3_ep0_interrupt(dwc, event);
1692 return;
1693 }
1694
1695 switch (event->endpoint_event) {
1696 case DWC3_DEPEVT_XFERCOMPLETE:
1697 dep->res_trans_idx = 0;
1698
1699 if (usb_endpoint_xfer_isoc(dep->desc)) {
1700 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1701 dep->name);
1702 return;
1703 }
1704
1705 dwc3_endpoint_transfer_complete(dwc, dep, event, 1);
1706 break;
1707 case DWC3_DEPEVT_XFERINPROGRESS:
1708 if (!usb_endpoint_xfer_isoc(dep->desc)) {
1709 dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n",
1710 dep->name);
1711 return;
1712 }
1713
1714 dwc3_endpoint_transfer_complete(dwc, dep, event, 0);
1715 break;
1716 case DWC3_DEPEVT_XFERNOTREADY:
1717 if (usb_endpoint_xfer_isoc(dep->desc)) {
1718 dwc3_gadget_start_isoc(dwc, dep, event);
1719 } else {
1720 int ret;
1721
1722 dev_vdbg(dwc->dev, "%s: reason %s\n",
1723 dep->name, event->status &
1724 DEPEVT_STATUS_TRANSFER_ACTIVE
1725 ? "Transfer Active"
1726 : "Transfer Not Active");
1727
1728 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1729 if (!ret || ret == -EBUSY)
1730 return;
1731
1732 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1733 dep->name);
1734 }
1735
1736 break;
1737 case DWC3_DEPEVT_STREAMEVT:
1738 if (!usb_endpoint_xfer_bulk(dep->desc)) {
1739 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1740 dep->name);
1741 return;
1742 }
1743
1744 switch (event->status) {
1745 case DEPEVT_STREAMEVT_FOUND:
1746 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1747 event->parameters);
1748
1749 break;
1750 case DEPEVT_STREAMEVT_NOTFOUND:
1751 /* FALLTHROUGH */
1752 default:
1753 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1754 }
1755 break;
1756 case DWC3_DEPEVT_RXTXFIFOEVT:
1757 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1758 break;
1759 case DWC3_DEPEVT_EPCMDCMPLT:
1760 dwc3_ep_cmd_compl(dep, event);
1761 break;
1762 }
1763 }
1764
dwc3_disconnect_gadget(struct dwc3 * dwc)1765 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1766 {
1767 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1768 spin_unlock(&dwc->lock);
1769 dwc->gadget_driver->disconnect(&dwc->gadget);
1770 spin_lock(&dwc->lock);
1771 }
1772 }
1773
dwc3_stop_active_transfer(struct dwc3 * dwc,u32 epnum)1774 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum)
1775 {
1776 struct dwc3_ep *dep;
1777 struct dwc3_gadget_ep_cmd_params params;
1778 u32 cmd;
1779 int ret;
1780
1781 dep = dwc->eps[epnum];
1782
1783 WARN_ON(!dep->res_trans_idx);
1784 if (dep->res_trans_idx) {
1785 cmd = DWC3_DEPCMD_ENDTRANSFER;
1786 cmd |= DWC3_DEPCMD_HIPRI_FORCERM | DWC3_DEPCMD_CMDIOC;
1787 cmd |= DWC3_DEPCMD_PARAM(dep->res_trans_idx);
1788 memset(¶ms, 0, sizeof(params));
1789 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
1790 WARN_ON_ONCE(ret);
1791 dep->res_trans_idx = 0;
1792 dep->flags &= ~DWC3_EP_BUSY;
1793 }
1794 }
1795
dwc3_stop_active_transfers(struct dwc3 * dwc)1796 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
1797 {
1798 u32 epnum;
1799
1800 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1801 struct dwc3_ep *dep;
1802
1803 dep = dwc->eps[epnum];
1804 if (!(dep->flags & DWC3_EP_ENABLED))
1805 continue;
1806
1807 dwc3_remove_requests(dwc, dep);
1808 }
1809 }
1810
dwc3_clear_stall_all_ep(struct dwc3 * dwc)1811 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
1812 {
1813 u32 epnum;
1814
1815 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1816 struct dwc3_ep *dep;
1817 struct dwc3_gadget_ep_cmd_params params;
1818 int ret;
1819
1820 dep = dwc->eps[epnum];
1821
1822 if (!(dep->flags & DWC3_EP_STALL))
1823 continue;
1824
1825 dep->flags &= ~DWC3_EP_STALL;
1826
1827 memset(¶ms, 0, sizeof(params));
1828 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1829 DWC3_DEPCMD_CLEARSTALL, ¶ms);
1830 WARN_ON_ONCE(ret);
1831 }
1832 }
1833
dwc3_gadget_disconnect_interrupt(struct dwc3 * dwc)1834 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
1835 {
1836 dev_vdbg(dwc->dev, "%s\n", __func__);
1837 #if 0
1838 XXX
1839 U1/U2 is powersave optimization. Skip it for now. Anyway we need to
1840 enable it before we can disable it.
1841
1842 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1843 reg &= ~DWC3_DCTL_INITU1ENA;
1844 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1845
1846 reg &= ~DWC3_DCTL_INITU2ENA;
1847 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1848 #endif
1849
1850 dwc3_stop_active_transfers(dwc);
1851 dwc3_disconnect_gadget(dwc);
1852 dwc->start_config_issued = false;
1853
1854 dwc->gadget.speed = USB_SPEED_UNKNOWN;
1855 dwc->setup_packet_pending = false;
1856 }
1857
dwc3_gadget_usb3_phy_power(struct dwc3 * dwc,int on)1858 static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
1859 {
1860 u32 reg;
1861
1862 reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1863
1864 if (on)
1865 reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
1866 else
1867 reg |= DWC3_GUSB3PIPECTL_SUSPHY;
1868
1869 dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
1870 }
1871
dwc3_gadget_usb2_phy_power(struct dwc3 * dwc,int on)1872 static void dwc3_gadget_usb2_phy_power(struct dwc3 *dwc, int on)
1873 {
1874 u32 reg;
1875
1876 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
1877
1878 if (on)
1879 reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
1880 else
1881 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
1882
1883 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
1884 }
1885
dwc3_gadget_reset_interrupt(struct dwc3 * dwc)1886 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
1887 {
1888 u32 reg;
1889
1890 dev_vdbg(dwc->dev, "%s\n", __func__);
1891
1892 /*
1893 * WORKAROUND: DWC3 revisions <1.88a have an issue which
1894 * would cause a missing Disconnect Event if there's a
1895 * pending Setup Packet in the FIFO.
1896 *
1897 * There's no suggested workaround on the official Bug
1898 * report, which states that "unless the driver/application
1899 * is doing any special handling of a disconnect event,
1900 * there is no functional issue".
1901 *
1902 * Unfortunately, it turns out that we _do_ some special
1903 * handling of a disconnect event, namely complete all
1904 * pending transfers, notify gadget driver of the
1905 * disconnection, and so on.
1906 *
1907 * Our suggested workaround is to follow the Disconnect
1908 * Event steps here, instead, based on a setup_packet_pending
1909 * flag. Such flag gets set whenever we have a XferNotReady
1910 * event on EP0 and gets cleared on XferComplete for the
1911 * same endpoint.
1912 *
1913 * Refers to:
1914 *
1915 * STAR#9000466709: RTL: Device : Disconnect event not
1916 * generated if setup packet pending in FIFO
1917 */
1918 if (dwc->revision < DWC3_REVISION_188A) {
1919 if (dwc->setup_packet_pending)
1920 dwc3_gadget_disconnect_interrupt(dwc);
1921 }
1922
1923 /* after reset -> Default State */
1924 dwc->dev_state = DWC3_DEFAULT_STATE;
1925
1926 /* Enable PHYs */
1927 dwc3_gadget_usb2_phy_power(dwc, true);
1928 dwc3_gadget_usb3_phy_power(dwc, true);
1929
1930 if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
1931 dwc3_disconnect_gadget(dwc);
1932
1933 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1934 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
1935 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1936 dwc->test_mode = false;
1937
1938 dwc3_stop_active_transfers(dwc);
1939 dwc3_clear_stall_all_ep(dwc);
1940 dwc->start_config_issued = false;
1941
1942 /* Reset device address to zero */
1943 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1944 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
1945 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1946 }
1947
dwc3_update_ram_clk_sel(struct dwc3 * dwc,u32 speed)1948 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
1949 {
1950 u32 reg;
1951 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
1952
1953 /*
1954 * We change the clock only at SS but I dunno why I would want to do
1955 * this. Maybe it becomes part of the power saving plan.
1956 */
1957
1958 if (speed != DWC3_DSTS_SUPERSPEED)
1959 return;
1960
1961 /*
1962 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
1963 * each time on Connect Done.
1964 */
1965 if (!usb30_clock)
1966 return;
1967
1968 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
1969 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
1970 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
1971 }
1972
dwc3_gadget_disable_phy(struct dwc3 * dwc,u8 speed)1973 static void dwc3_gadget_disable_phy(struct dwc3 *dwc, u8 speed)
1974 {
1975 switch (speed) {
1976 case USB_SPEED_SUPER:
1977 dwc3_gadget_usb2_phy_power(dwc, false);
1978 break;
1979 case USB_SPEED_HIGH:
1980 case USB_SPEED_FULL:
1981 case USB_SPEED_LOW:
1982 dwc3_gadget_usb3_phy_power(dwc, false);
1983 break;
1984 }
1985 }
1986
dwc3_gadget_conndone_interrupt(struct dwc3 * dwc)1987 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
1988 {
1989 struct dwc3_gadget_ep_cmd_params params;
1990 struct dwc3_ep *dep;
1991 int ret;
1992 u32 reg;
1993 u8 speed;
1994
1995 dev_vdbg(dwc->dev, "%s\n", __func__);
1996
1997 memset(¶ms, 0x00, sizeof(params));
1998
1999 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2000 speed = reg & DWC3_DSTS_CONNECTSPD;
2001 dwc->speed = speed;
2002
2003 dwc3_update_ram_clk_sel(dwc, speed);
2004
2005 switch (speed) {
2006 case DWC3_DCFG_SUPERSPEED:
2007 /*
2008 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2009 * would cause a missing USB3 Reset event.
2010 *
2011 * In such situations, we should force a USB3 Reset
2012 * event by calling our dwc3_gadget_reset_interrupt()
2013 * routine.
2014 *
2015 * Refers to:
2016 *
2017 * STAR#9000483510: RTL: SS : USB3 reset event may
2018 * not be generated always when the link enters poll
2019 */
2020 if (dwc->revision < DWC3_REVISION_190A)
2021 dwc3_gadget_reset_interrupt(dwc);
2022
2023 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2024 dwc->gadget.ep0->maxpacket = 512;
2025 dwc->gadget.speed = USB_SPEED_SUPER;
2026 break;
2027 case DWC3_DCFG_HIGHSPEED:
2028 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2029 dwc->gadget.ep0->maxpacket = 64;
2030 dwc->gadget.speed = USB_SPEED_HIGH;
2031 break;
2032 case DWC3_DCFG_FULLSPEED2:
2033 case DWC3_DCFG_FULLSPEED1:
2034 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2035 dwc->gadget.ep0->maxpacket = 64;
2036 dwc->gadget.speed = USB_SPEED_FULL;
2037 break;
2038 case DWC3_DCFG_LOWSPEED:
2039 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2040 dwc->gadget.ep0->maxpacket = 8;
2041 dwc->gadget.speed = USB_SPEED_LOW;
2042 break;
2043 }
2044
2045 /* Disable unneded PHY */
2046 dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
2047
2048 dep = dwc->eps[0];
2049 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2050 if (ret) {
2051 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2052 return;
2053 }
2054
2055 dep = dwc->eps[1];
2056 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
2057 if (ret) {
2058 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2059 return;
2060 }
2061
2062 /*
2063 * Configure PHY via GUSB3PIPECTLn if required.
2064 *
2065 * Update GTXFIFOSIZn
2066 *
2067 * In both cases reset values should be sufficient.
2068 */
2069 }
2070
dwc3_gadget_wakeup_interrupt(struct dwc3 * dwc)2071 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2072 {
2073 dev_vdbg(dwc->dev, "%s\n", __func__);
2074
2075 /*
2076 * TODO take core out of low power mode when that's
2077 * implemented.
2078 */
2079
2080 dwc->gadget_driver->resume(&dwc->gadget);
2081 }
2082
dwc3_gadget_linksts_change_interrupt(struct dwc3 * dwc,unsigned int evtinfo)2083 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2084 unsigned int evtinfo)
2085 {
2086 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2087
2088 /*
2089 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2090 * on the link partner, the USB session might do multiple entry/exit
2091 * of low power states before a transfer takes place.
2092 *
2093 * Due to this problem, we might experience lower throughput. The
2094 * suggested workaround is to disable DCTL[12:9] bits if we're
2095 * transitioning from U1/U2 to U0 and enable those bits again
2096 * after a transfer completes and there are no pending transfers
2097 * on any of the enabled endpoints.
2098 *
2099 * This is the first half of that workaround.
2100 *
2101 * Refers to:
2102 *
2103 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2104 * core send LGO_Ux entering U0
2105 */
2106 if (dwc->revision < DWC3_REVISION_183A) {
2107 if (next == DWC3_LINK_STATE_U0) {
2108 u32 u1u2;
2109 u32 reg;
2110
2111 switch (dwc->link_state) {
2112 case DWC3_LINK_STATE_U1:
2113 case DWC3_LINK_STATE_U2:
2114 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2115 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2116 | DWC3_DCTL_ACCEPTU2ENA
2117 | DWC3_DCTL_INITU1ENA
2118 | DWC3_DCTL_ACCEPTU1ENA);
2119
2120 if (!dwc->u1u2)
2121 dwc->u1u2 = reg & u1u2;
2122
2123 reg &= ~u1u2;
2124
2125 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2126 break;
2127 default:
2128 /* do nothing */
2129 break;
2130 }
2131 }
2132 }
2133
2134 dwc->link_state = next;
2135
2136 dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
2137 }
2138
dwc3_gadget_interrupt(struct dwc3 * dwc,const struct dwc3_event_devt * event)2139 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2140 const struct dwc3_event_devt *event)
2141 {
2142 switch (event->type) {
2143 case DWC3_DEVICE_EVENT_DISCONNECT:
2144 dwc3_gadget_disconnect_interrupt(dwc);
2145 break;
2146 case DWC3_DEVICE_EVENT_RESET:
2147 dwc3_gadget_reset_interrupt(dwc);
2148 break;
2149 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2150 dwc3_gadget_conndone_interrupt(dwc);
2151 break;
2152 case DWC3_DEVICE_EVENT_WAKEUP:
2153 dwc3_gadget_wakeup_interrupt(dwc);
2154 break;
2155 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2156 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2157 break;
2158 case DWC3_DEVICE_EVENT_EOPF:
2159 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2160 break;
2161 case DWC3_DEVICE_EVENT_SOF:
2162 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2163 break;
2164 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2165 dev_vdbg(dwc->dev, "Erratic Error\n");
2166 break;
2167 case DWC3_DEVICE_EVENT_CMD_CMPL:
2168 dev_vdbg(dwc->dev, "Command Complete\n");
2169 break;
2170 case DWC3_DEVICE_EVENT_OVERFLOW:
2171 dev_vdbg(dwc->dev, "Overflow\n");
2172 break;
2173 default:
2174 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2175 }
2176 }
2177
dwc3_process_event_entry(struct dwc3 * dwc,const union dwc3_event * event)2178 static void dwc3_process_event_entry(struct dwc3 *dwc,
2179 const union dwc3_event *event)
2180 {
2181 /* Endpoint IRQ, handle it and return early */
2182 if (event->type.is_devspec == 0) {
2183 /* depevt */
2184 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2185 }
2186
2187 switch (event->type.type) {
2188 case DWC3_EVENT_TYPE_DEV:
2189 dwc3_gadget_interrupt(dwc, &event->devt);
2190 break;
2191 /* REVISIT what to do with Carkit and I2C events ? */
2192 default:
2193 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2194 }
2195 }
2196
dwc3_process_event_buf(struct dwc3 * dwc,u32 buf)2197 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2198 {
2199 struct dwc3_event_buffer *evt;
2200 int left;
2201 u32 count;
2202
2203 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2204 count &= DWC3_GEVNTCOUNT_MASK;
2205 if (!count)
2206 return IRQ_NONE;
2207
2208 evt = dwc->ev_buffs[buf];
2209 left = count;
2210
2211 while (left > 0) {
2212 union dwc3_event event;
2213
2214 event.raw = *(u32 *) (evt->buf + evt->lpos);
2215
2216 dwc3_process_event_entry(dwc, &event);
2217 /*
2218 * XXX we wrap around correctly to the next entry as almost all
2219 * entries are 4 bytes in size. There is one entry which has 12
2220 * bytes which is a regular entry followed by 8 bytes data. ATM
2221 * I don't know how things are organized if were get next to the
2222 * a boundary so I worry about that once we try to handle that.
2223 */
2224 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2225 left -= 4;
2226
2227 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2228 }
2229
2230 return IRQ_HANDLED;
2231 }
2232
dwc3_interrupt(int irq,void * _dwc)2233 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2234 {
2235 struct dwc3 *dwc = _dwc;
2236 int i;
2237 irqreturn_t ret = IRQ_NONE;
2238
2239 spin_lock(&dwc->lock);
2240
2241 for (i = 0; i < dwc->num_event_buffers; i++) {
2242 irqreturn_t status;
2243
2244 status = dwc3_process_event_buf(dwc, i);
2245 if (status == IRQ_HANDLED)
2246 ret = status;
2247 }
2248
2249 spin_unlock(&dwc->lock);
2250
2251 return ret;
2252 }
2253
2254 /**
2255 * dwc3_gadget_init - Initializes gadget related registers
2256 * @dwc: pointer to our controller context structure
2257 *
2258 * Returns 0 on success otherwise negative errno.
2259 */
dwc3_gadget_init(struct dwc3 * dwc)2260 int __devinit dwc3_gadget_init(struct dwc3 *dwc)
2261 {
2262 u32 reg;
2263 int ret;
2264 int irq;
2265
2266 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2267 &dwc->ctrl_req_addr, GFP_KERNEL);
2268 if (!dwc->ctrl_req) {
2269 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2270 ret = -ENOMEM;
2271 goto err0;
2272 }
2273
2274 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2275 &dwc->ep0_trb_addr, GFP_KERNEL);
2276 if (!dwc->ep0_trb) {
2277 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2278 ret = -ENOMEM;
2279 goto err1;
2280 }
2281
2282 dwc->setup_buf = kzalloc(sizeof(*dwc->setup_buf) * 2,
2283 GFP_KERNEL);
2284 if (!dwc->setup_buf) {
2285 dev_err(dwc->dev, "failed to allocate setup buffer\n");
2286 ret = -ENOMEM;
2287 goto err2;
2288 }
2289
2290 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2291 512, &dwc->ep0_bounce_addr, GFP_KERNEL);
2292 if (!dwc->ep0_bounce) {
2293 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2294 ret = -ENOMEM;
2295 goto err3;
2296 }
2297
2298 dev_set_name(&dwc->gadget.dev, "gadget");
2299
2300 dwc->gadget.ops = &dwc3_gadget_ops;
2301 dwc->gadget.max_speed = USB_SPEED_SUPER;
2302 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2303 dwc->gadget.dev.parent = dwc->dev;
2304 dwc->gadget.sg_supported = true;
2305
2306 dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
2307
2308 dwc->gadget.dev.dma_parms = dwc->dev->dma_parms;
2309 dwc->gadget.dev.dma_mask = dwc->dev->dma_mask;
2310 dwc->gadget.dev.release = dwc3_gadget_release;
2311 dwc->gadget.name = "dwc3-gadget";
2312
2313 /*
2314 * REVISIT: Here we should clear all pending IRQs to be
2315 * sure we're starting from a well known location.
2316 */
2317
2318 ret = dwc3_gadget_init_endpoints(dwc);
2319 if (ret)
2320 goto err4;
2321
2322 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2323
2324 ret = request_irq(irq, dwc3_interrupt, IRQF_SHARED,
2325 "dwc3", dwc);
2326 if (ret) {
2327 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2328 irq, ret);
2329 goto err5;
2330 }
2331
2332 /* Enable all but Start and End of Frame IRQs */
2333 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
2334 DWC3_DEVTEN_EVNTOVERFLOWEN |
2335 DWC3_DEVTEN_CMDCMPLTEN |
2336 DWC3_DEVTEN_ERRTICERREN |
2337 DWC3_DEVTEN_WKUPEVTEN |
2338 DWC3_DEVTEN_ULSTCNGEN |
2339 DWC3_DEVTEN_CONNECTDONEEN |
2340 DWC3_DEVTEN_USBRSTEN |
2341 DWC3_DEVTEN_DISCONNEVTEN);
2342 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2343
2344 ret = device_register(&dwc->gadget.dev);
2345 if (ret) {
2346 dev_err(dwc->dev, "failed to register gadget device\n");
2347 put_device(&dwc->gadget.dev);
2348 goto err6;
2349 }
2350
2351 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2352 if (ret) {
2353 dev_err(dwc->dev, "failed to register udc\n");
2354 goto err7;
2355 }
2356
2357 return 0;
2358
2359 err7:
2360 device_unregister(&dwc->gadget.dev);
2361
2362 err6:
2363 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2364 free_irq(irq, dwc);
2365
2366 err5:
2367 dwc3_gadget_free_endpoints(dwc);
2368
2369 err4:
2370 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2371 dwc->ep0_bounce_addr);
2372
2373 err3:
2374 kfree(dwc->setup_buf);
2375
2376 err2:
2377 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2378 dwc->ep0_trb, dwc->ep0_trb_addr);
2379
2380 err1:
2381 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2382 dwc->ctrl_req, dwc->ctrl_req_addr);
2383
2384 err0:
2385 return ret;
2386 }
2387
dwc3_gadget_exit(struct dwc3 * dwc)2388 void dwc3_gadget_exit(struct dwc3 *dwc)
2389 {
2390 int irq;
2391
2392 usb_del_gadget_udc(&dwc->gadget);
2393 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2394
2395 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2396 free_irq(irq, dwc);
2397
2398 dwc3_gadget_free_endpoints(dwc);
2399
2400 dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
2401 dwc->ep0_bounce_addr);
2402
2403 kfree(dwc->setup_buf);
2404
2405 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2406 dwc->ep0_trb, dwc->ep0_trb_addr);
2407
2408 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2409 dwc->ctrl_req, dwc->ctrl_req_addr);
2410
2411 device_unregister(&dwc->gadget.dev);
2412 }
2413