1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2 /*
3 * hcd.h - DesignWare HS OTG Controller host-mode declarations
4 *
5 * Copyright (C) 2004-2013 Synopsys, Inc.
6 */
7
8 #ifndef __DWC2_HCD_H__
9 #define __DWC2_HCD_H__
10
11 /*
12 * This file contains the structures, constants, and interfaces for the
13 * Host Contoller Driver (HCD)
14 *
15 * The Host Controller Driver (HCD) is responsible for translating requests
16 * from the USB Driver into the appropriate actions on the DWC_otg controller.
17 * It isolates the USBD from the specifics of the controller by providing an
18 * API to the USBD.
19 */
20
21 struct dwc2_qh;
22
23 /**
24 * struct dwc2_host_chan - Software host channel descriptor
25 *
26 * @hc_num: Host channel number, used for register address lookup
27 * @dev_addr: Address of the device
28 * @ep_num: Endpoint of the device
29 * @ep_is_in: Endpoint direction
30 * @speed: Device speed. One of the following values:
31 * - USB_SPEED_LOW
32 * - USB_SPEED_FULL
33 * - USB_SPEED_HIGH
34 * @ep_type: Endpoint type. One of the following values:
35 * - USB_ENDPOINT_XFER_CONTROL: 0
36 * - USB_ENDPOINT_XFER_ISOC: 1
37 * - USB_ENDPOINT_XFER_BULK: 2
38 * - USB_ENDPOINT_XFER_INTR: 3
39 * @max_packet: Max packet size in bytes
40 * @data_pid_start: PID for initial transaction.
41 * 0: DATA0
42 * 1: DATA2
43 * 2: DATA1
44 * 3: MDATA (non-Control EP),
45 * SETUP (Control EP)
46 * @multi_count: Number of additional periodic transactions per
47 * (micro)frame
48 * @xfer_buf: Pointer to current transfer buffer position
49 * @xfer_dma: DMA address of xfer_buf
50 * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
51 * DWORD aligned
52 * @xfer_len: Total number of bytes to transfer
53 * @xfer_count: Number of bytes transferred so far
54 * @start_pkt_count: Packet count at start of transfer
55 * @xfer_started: True if the transfer has been started
56 * @do_ping: True if a PING request should be issued on this channel
57 * @error_state: True if the error count for this transaction is non-zero
58 * @halt_on_queue: True if this channel should be halted the next time a
59 * request is queued for the channel. This is necessary in
60 * slave mode if no request queue space is available when
61 * an attempt is made to halt the channel.
62 * @halt_pending: True if the host channel has been halted, but the core
63 * is not finished flushing queued requests
64 * @do_split: Enable split for the channel
65 * @complete_split: Enable complete split
66 * @hub_addr: Address of high speed hub for the split
67 * @hub_port: Port of the low/full speed device for the split
68 * @xact_pos: Split transaction position. One of the following values:
69 * - DWC2_HCSPLT_XACTPOS_MID
70 * - DWC2_HCSPLT_XACTPOS_BEGIN
71 * - DWC2_HCSPLT_XACTPOS_END
72 * - DWC2_HCSPLT_XACTPOS_ALL
73 * @requests: Number of requests issued for this channel since it was
74 * assigned to the current transfer (not counting PINGs)
75 * @schinfo: Scheduling micro-frame bitmap
76 * @ntd: Number of transfer descriptors for the transfer
77 * @halt_status: Reason for halting the host channel
78 * @hcint: Contents of the HCINT register when the interrupt came
79 * @qh: QH for the transfer being processed by this channel
80 * @hc_list_entry: For linking to list of host channels
81 * @desc_list_addr: Current QH's descriptor list DMA address
82 * @desc_list_sz: Current QH's descriptor list size
83 * @split_order_list_entry: List entry for keeping track of the order of splits
84 *
85 * This structure represents the state of a single host channel when acting in
86 * host mode. It contains the data items needed to transfer packets to an
87 * endpoint via a host channel.
88 */
89 struct dwc2_host_chan {
90 u8 hc_num;
91
92 unsigned dev_addr:7;
93 unsigned ep_num:4;
94 unsigned ep_is_in:1;
95 unsigned speed:4;
96 unsigned ep_type:2;
97 unsigned max_packet:11;
98 unsigned data_pid_start:2;
99 #define DWC2_HC_PID_DATA0 TSIZ_SC_MC_PID_DATA0
100 #define DWC2_HC_PID_DATA2 TSIZ_SC_MC_PID_DATA2
101 #define DWC2_HC_PID_DATA1 TSIZ_SC_MC_PID_DATA1
102 #define DWC2_HC_PID_MDATA TSIZ_SC_MC_PID_MDATA
103 #define DWC2_HC_PID_SETUP TSIZ_SC_MC_PID_SETUP
104
105 unsigned multi_count:2;
106
107 u8 *xfer_buf;
108 dma_addr_t xfer_dma;
109 dma_addr_t align_buf;
110 u32 xfer_len;
111 u32 xfer_count;
112 u16 start_pkt_count;
113 u8 xfer_started;
114 u8 do_ping;
115 u8 error_state;
116 u8 halt_on_queue;
117 u8 halt_pending;
118 u8 do_split;
119 u8 complete_split;
120 u8 hub_addr;
121 u8 hub_port;
122 u8 xact_pos;
123 #define DWC2_HCSPLT_XACTPOS_MID HCSPLT_XACTPOS_MID
124 #define DWC2_HCSPLT_XACTPOS_END HCSPLT_XACTPOS_END
125 #define DWC2_HCSPLT_XACTPOS_BEGIN HCSPLT_XACTPOS_BEGIN
126 #define DWC2_HCSPLT_XACTPOS_ALL HCSPLT_XACTPOS_ALL
127
128 u8 requests;
129 u8 schinfo;
130 u16 ntd;
131 enum dwc2_halt_status halt_status;
132 u32 hcint;
133 struct dwc2_qh *qh;
134 struct list_head hc_list_entry;
135 dma_addr_t desc_list_addr;
136 u32 desc_list_sz;
137 struct list_head split_order_list_entry;
138 };
139
140 struct dwc2_hcd_pipe_info {
141 u8 dev_addr;
142 u8 ep_num;
143 u8 pipe_type;
144 u8 pipe_dir;
145 u16 maxp;
146 u16 maxp_mult;
147 };
148
149 struct dwc2_hcd_iso_packet_desc {
150 u32 offset;
151 u32 length;
152 u32 actual_length;
153 u32 status;
154 };
155
156 struct dwc2_qtd;
157
158 struct dwc2_hcd_urb {
159 void *priv;
160 struct dwc2_qtd *qtd;
161 void *buf;
162 dma_addr_t dma;
163 void *setup_packet;
164 dma_addr_t setup_dma;
165 u32 length;
166 u32 actual_length;
167 u32 status;
168 u32 error_count;
169 u32 packet_count;
170 u32 flags;
171 u16 interval;
172 struct dwc2_hcd_pipe_info pipe_info;
173 struct dwc2_hcd_iso_packet_desc iso_descs[];
174 };
175
176 /* Phases for control transfers */
177 enum dwc2_control_phase {
178 DWC2_CONTROL_SETUP,
179 DWC2_CONTROL_DATA,
180 DWC2_CONTROL_STATUS,
181 };
182
183 /* Transaction types */
184 enum dwc2_transaction_type {
185 DWC2_TRANSACTION_NONE,
186 DWC2_TRANSACTION_PERIODIC,
187 DWC2_TRANSACTION_NON_PERIODIC,
188 DWC2_TRANSACTION_ALL,
189 };
190
191 /* The number of elements per LS bitmap (per port on multi_tt) */
192 #define DWC2_ELEMENTS_PER_LS_BITMAP DIV_ROUND_UP(DWC2_LS_SCHEDULE_SLICES, \
193 BITS_PER_LONG)
194
195 /**
196 * struct dwc2_tt - dwc2 data associated with a usb_tt
197 *
198 * @refcount: Number of Queue Heads (QHs) holding a reference.
199 * @usb_tt: Pointer back to the official usb_tt.
200 * @periodic_bitmaps: Bitmap for which parts of the 1ms frame are accounted
201 * for already. Each is DWC2_ELEMENTS_PER_LS_BITMAP
202 * elements (so sizeof(long) times that in bytes).
203 *
204 * This structure is stored in the hcpriv of the official usb_tt.
205 */
206 struct dwc2_tt {
207 int refcount;
208 struct usb_tt *usb_tt;
209 unsigned long periodic_bitmaps[];
210 };
211
212 /**
213 * struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus.
214 *
215 * @start_schedule_us: The start time on the main bus schedule. Note that
216 * the main bus schedule is tightly packed and this
217 * time should be interpreted as tightly packed (so
218 * uFrame 0 starts at 0 us, uFrame 1 starts at 100 us
219 * instead of 125 us).
220 * @duration_us: How long this transfer goes.
221 */
222
223 struct dwc2_hs_transfer_time {
224 u32 start_schedule_us;
225 u16 duration_us;
226 };
227
228 /**
229 * struct dwc2_qh - Software queue head structure
230 *
231 * @hsotg: The HCD state structure for the DWC OTG controller
232 * @ep_type: Endpoint type. One of the following values:
233 * - USB_ENDPOINT_XFER_CONTROL
234 * - USB_ENDPOINT_XFER_BULK
235 * - USB_ENDPOINT_XFER_INT
236 * - USB_ENDPOINT_XFER_ISOC
237 * @ep_is_in: Endpoint direction
238 * @maxp: Value from wMaxPacketSize field of Endpoint Descriptor
239 * @maxp_mult: Multiplier for maxp
240 * @dev_speed: Device speed. One of the following values:
241 * - USB_SPEED_LOW
242 * - USB_SPEED_FULL
243 * - USB_SPEED_HIGH
244 * @data_toggle: Determines the PID of the next data packet for
245 * non-controltransfers. Ignored for control transfers.
246 * One of the following values:
247 * - DWC2_HC_PID_DATA0
248 * - DWC2_HC_PID_DATA1
249 * @ping_state: Ping state
250 * @do_split: Full/low speed endpoint on high-speed hub requires split
251 * @td_first: Index of first activated isochronous transfer descriptor
252 * @td_last: Index of last activated isochronous transfer descriptor
253 * @host_us: Bandwidth in microseconds per transfer as seen by host
254 * @device_us: Bandwidth in microseconds per transfer as seen by device
255 * @host_interval: Interval between transfers as seen by the host. If
256 * the host is high speed and the device is low speed this
257 * will be 8 times device interval.
258 * @device_interval: Interval between transfers as seen by the device.
259 * interval.
260 * @next_active_frame: (Micro)frame _before_ we next need to put something on
261 * the bus. We'll move the qh to active here. If the
262 * host is in high speed mode this will be a uframe. If
263 * the host is in low speed mode this will be a full frame.
264 * @start_active_frame: If we are partway through a split transfer, this will be
265 * what next_active_frame was when we started. Otherwise
266 * it should always be the same as next_active_frame.
267 * @num_hs_transfers: Number of transfers in hs_transfers.
268 * Normally this is 1 but can be more than one for splits.
269 * Always >= 1 unless the host is in low/full speed mode.
270 * @hs_transfers: Transfers that are scheduled as seen by the high speed
271 * bus. Not used if host is in low or full speed mode (but
272 * note that it IS USED if the device is low or full speed
273 * as long as the HOST is in high speed mode).
274 * @ls_start_schedule_slice: Start time (in slices) on the low speed bus
275 * schedule that's being used by this device. This
276 * will be on the periodic_bitmap in a
277 * "struct dwc2_tt". Not used if this device is high
278 * speed. Note that this is in "schedule slice" which
279 * is tightly packed.
280 * @ntd: Actual number of transfer descriptors in a list
281 * @dw_align_buf: Used instead of original buffer if its physical address
282 * is not dword-aligned
283 * @dw_align_buf_dma: DMA address for dw_align_buf
284 * @qtd_list: List of QTDs for this QH
285 * @channel: Host channel currently processing transfers for this QH
286 * @qh_list_entry: Entry for QH in either the periodic or non-periodic
287 * schedule
288 * @desc_list: List of transfer descriptors
289 * @desc_list_dma: Physical address of desc_list
290 * @desc_list_sz: Size of descriptors list
291 * @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
292 * descriptor and indicates original XferSize value for the
293 * descriptor
294 * @unreserve_timer: Timer for releasing periodic reservation.
295 * @wait_timer: Timer used to wait before re-queuing.
296 * @dwc_tt: Pointer to our tt info (or NULL if no tt).
297 * @ttport: Port number within our tt.
298 * @tt_buffer_dirty True if clear_tt_buffer_complete is pending
299 * @unreserve_pending: True if we planned to unreserve but haven't yet.
300 * @schedule_low_speed: True if we have a low/full speed component (either the
301 * host is in low/full speed mode or do_split).
302 * @want_wait: We should wait before re-queuing; only matters for non-
303 * periodic transfers and is ignored for periodic ones.
304 * @wait_timer_cancel: Set to true to cancel the wait_timer.
305 *
306 * @tt_buffer_dirty: True if EP's TT buffer is not clean.
307 * A Queue Head (QH) holds the static characteristics of an endpoint and
308 * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
309 * be entered in either the non-periodic or periodic schedule.
310 */
311 struct dwc2_qh {
312 struct dwc2_hsotg *hsotg;
313 u8 ep_type;
314 u8 ep_is_in;
315 u16 maxp;
316 u16 maxp_mult;
317 u8 dev_speed;
318 u8 data_toggle;
319 u8 ping_state;
320 u8 do_split;
321 u8 td_first;
322 u8 td_last;
323 u16 host_us;
324 u16 device_us;
325 u16 host_interval;
326 u16 device_interval;
327 u16 next_active_frame;
328 u16 start_active_frame;
329 s16 num_hs_transfers;
330 struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
331 u32 ls_start_schedule_slice;
332 u16 ntd;
333 u8 *dw_align_buf;
334 dma_addr_t dw_align_buf_dma;
335 struct list_head qtd_list;
336 struct dwc2_host_chan *channel;
337 struct list_head qh_list_entry;
338 struct dwc2_dma_desc *desc_list;
339 dma_addr_t desc_list_dma;
340 u32 desc_list_sz;
341 u32 *n_bytes;
342 struct timer_list unreserve_timer;
343 struct hrtimer wait_timer;
344 struct dwc2_tt *dwc_tt;
345 int ttport;
346 unsigned tt_buffer_dirty:1;
347 unsigned unreserve_pending:1;
348 unsigned schedule_low_speed:1;
349 unsigned want_wait:1;
350 unsigned wait_timer_cancel:1;
351 };
352
353 /**
354 * struct dwc2_qtd - Software queue transfer descriptor (QTD)
355 *
356 * @control_phase: Current phase for control transfers (Setup, Data, or
357 * Status)
358 * @in_process: Indicates if this QTD is currently processed by HW
359 * @data_toggle: Determines the PID of the next data packet for the
360 * data phase of control transfers. Ignored for other
361 * transfer types. One of the following values:
362 * - DWC2_HC_PID_DATA0
363 * - DWC2_HC_PID_DATA1
364 * @complete_split: Keeps track of the current split type for FS/LS
365 * endpoints on a HS Hub
366 * @isoc_split_pos: Position of the ISOC split in full/low speed
367 * @isoc_frame_index: Index of the next frame descriptor for an isochronous
368 * transfer. A frame descriptor describes the buffer
369 * position and length of the data to be transferred in the
370 * next scheduled (micro)frame of an isochronous transfer.
371 * It also holds status for that transaction. The frame
372 * index starts at 0.
373 * @isoc_split_offset: Position of the ISOC split in the buffer for the
374 * current frame
375 * @ssplit_out_xfer_count: How many bytes transferred during SSPLIT OUT
376 * @error_count: Holds the number of bus errors that have occurred for
377 * a transaction within this transfer
378 * @n_desc: Number of DMA descriptors for this QTD
379 * @isoc_frame_index_last: Last activated frame (packet) index, used in
380 * descriptor DMA mode only
381 * @num_naks: Number of NAKs received on this QTD.
382 * @urb: URB for this transfer
383 * @qh: Queue head for this QTD
384 * @qtd_list_entry: For linking to the QH's list of QTDs
385 * @isoc_td_first: Index of first activated isochronous transfer
386 * descriptor in Descriptor DMA mode
387 * @isoc_td_last: Index of last activated isochronous transfer
388 * descriptor in Descriptor DMA mode
389 *
390 * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
391 * interrupt, or isochronous transfer. A single QTD is created for each URB
392 * (of one of these types) submitted to the HCD. The transfer associated with
393 * a QTD may require one or multiple transactions.
394 *
395 * A QTD is linked to a Queue Head, which is entered in either the
396 * non-periodic or periodic schedule for execution. When a QTD is chosen for
397 * execution, some or all of its transactions may be executed. After
398 * execution, the state of the QTD is updated. The QTD may be retired if all
399 * its transactions are complete or if an error occurred. Otherwise, it
400 * remains in the schedule so more transactions can be executed later.
401 */
402 struct dwc2_qtd {
403 enum dwc2_control_phase control_phase;
404 u8 in_process;
405 u8 data_toggle;
406 u8 complete_split;
407 u8 isoc_split_pos;
408 u16 isoc_frame_index;
409 u16 isoc_split_offset;
410 u16 isoc_td_last;
411 u16 isoc_td_first;
412 u32 ssplit_out_xfer_count;
413 u8 error_count;
414 u8 n_desc;
415 u16 isoc_frame_index_last;
416 u16 num_naks;
417 struct dwc2_hcd_urb *urb;
418 struct dwc2_qh *qh;
419 struct list_head qtd_list_entry;
420 };
421
422 #ifdef DEBUG
423 struct hc_xfer_info {
424 struct dwc2_hsotg *hsotg;
425 struct dwc2_host_chan *chan;
426 };
427 #endif
428
429 u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg);
430
431 /* Gets the struct usb_hcd that contains a struct dwc2_hsotg */
dwc2_hsotg_to_hcd(struct dwc2_hsotg * hsotg)432 static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
433 {
434 return (struct usb_hcd *)hsotg->priv;
435 }
436
437 /*
438 * Inline used to disable one channel interrupt. Channel interrupts are
439 * disabled when the channel is halted or released by the interrupt handler.
440 * There is no need to handle further interrupts of that type until the
441 * channel is re-assigned. In fact, subsequent handling may cause crashes
442 * because the channel structures are cleaned up when the channel is released.
443 */
disable_hc_int(struct dwc2_hsotg * hsotg,int chnum,u32 intr)444 static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
445 {
446 u32 mask = dwc2_readl(hsotg, HCINTMSK(chnum));
447
448 mask &= ~intr;
449 dwc2_writel(hsotg, mask, HCINTMSK(chnum));
450 }
451
452 void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan);
453 void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
454 enum dwc2_halt_status halt_status);
455 void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
456 struct dwc2_host_chan *chan);
457
458 /*
459 * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they
460 * are read as 1, they won't clear when written back.
461 */
dwc2_read_hprt0(struct dwc2_hsotg * hsotg)462 static inline u32 dwc2_read_hprt0(struct dwc2_hsotg *hsotg)
463 {
464 u32 hprt0 = dwc2_readl(hsotg, HPRT0);
465
466 hprt0 &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | HPRT0_OVRCURRCHG);
467 return hprt0;
468 }
469
dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info * pipe)470 static inline u8 dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info *pipe)
471 {
472 return pipe->ep_num;
473 }
474
dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info * pipe)475 static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
476 {
477 return pipe->pipe_type;
478 }
479
dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info * pipe)480 static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe)
481 {
482 return pipe->maxp;
483 }
484
dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info * pipe)485 static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe)
486 {
487 return pipe->maxp_mult;
488 }
489
dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info * pipe)490 static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
491 {
492 return pipe->dev_addr;
493 }
494
dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info * pipe)495 static inline u8 dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info *pipe)
496 {
497 return pipe->pipe_type == USB_ENDPOINT_XFER_ISOC;
498 }
499
dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info * pipe)500 static inline u8 dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info *pipe)
501 {
502 return pipe->pipe_type == USB_ENDPOINT_XFER_INT;
503 }
504
dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info * pipe)505 static inline u8 dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info *pipe)
506 {
507 return pipe->pipe_type == USB_ENDPOINT_XFER_BULK;
508 }
509
dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info * pipe)510 static inline u8 dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info *pipe)
511 {
512 return pipe->pipe_type == USB_ENDPOINT_XFER_CONTROL;
513 }
514
dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info * pipe)515 static inline u8 dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info *pipe)
516 {
517 return pipe->pipe_dir == USB_DIR_IN;
518 }
519
dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info * pipe)520 static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
521 {
522 return !dwc2_hcd_is_pipe_in(pipe);
523 }
524
525 int dwc2_hcd_init(struct dwc2_hsotg *hsotg);
526 void dwc2_hcd_remove(struct dwc2_hsotg *hsotg);
527
528 /* Transaction Execution Functions */
529 enum dwc2_transaction_type dwc2_hcd_select_transactions(
530 struct dwc2_hsotg *hsotg);
531 void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
532 enum dwc2_transaction_type tr_type);
533
534 /* Schedule Queue Functions */
535 /* Implemented in hcd_queue.c */
536 struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
537 struct dwc2_hcd_urb *urb,
538 gfp_t mem_flags);
539 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
540 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
541 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
542 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
543 int sched_csplit);
544
545 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
546 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
547 struct dwc2_qh *qh);
548
549 /* Unlinks and frees a QTD */
dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg * hsotg,struct dwc2_qtd * qtd,struct dwc2_qh * qh)550 static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
551 struct dwc2_qtd *qtd,
552 struct dwc2_qh *qh)
553 {
554 list_del(&qtd->qtd_list_entry);
555 kfree(qtd);
556 }
557
558 /* Descriptor DMA support functions */
559 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg,
560 struct dwc2_qh *qh);
561 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
562 struct dwc2_host_chan *chan, int chnum,
563 enum dwc2_halt_status halt_status);
564
565 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
566 gfp_t mem_flags);
567 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
568
569 /* Check if QH is non-periodic */
570 #define dwc2_qh_is_non_per(_qh_ptr_) \
571 ((_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_BULK || \
572 (_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_CONTROL)
573
574 #ifdef CONFIG_USB_DWC2_DEBUG_PERIODIC
dbg_hc(struct dwc2_host_chan * hc)575 static inline bool dbg_hc(struct dwc2_host_chan *hc) { return true; }
dbg_qh(struct dwc2_qh * qh)576 static inline bool dbg_qh(struct dwc2_qh *qh) { return true; }
dbg_urb(struct urb * urb)577 static inline bool dbg_urb(struct urb *urb) { return true; }
dbg_perio(void)578 static inline bool dbg_perio(void) { return true; }
579 #else /* !CONFIG_USB_DWC2_DEBUG_PERIODIC */
dbg_hc(struct dwc2_host_chan * hc)580 static inline bool dbg_hc(struct dwc2_host_chan *hc)
581 {
582 return hc->ep_type == USB_ENDPOINT_XFER_BULK ||
583 hc->ep_type == USB_ENDPOINT_XFER_CONTROL;
584 }
585
dbg_qh(struct dwc2_qh * qh)586 static inline bool dbg_qh(struct dwc2_qh *qh)
587 {
588 return qh->ep_type == USB_ENDPOINT_XFER_BULK ||
589 qh->ep_type == USB_ENDPOINT_XFER_CONTROL;
590 }
591
dbg_urb(struct urb * urb)592 static inline bool dbg_urb(struct urb *urb)
593 {
594 return usb_pipetype(urb->pipe) == PIPE_BULK ||
595 usb_pipetype(urb->pipe) == PIPE_CONTROL;
596 }
597
dbg_perio(void)598 static inline bool dbg_perio(void) { return false; }
599 #endif
600
601 /*
602 * Returns true if frame1 index is greater than frame2 index. The comparison
603 * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the
604 * frame number when the max index frame number is reached.
605 */
dwc2_frame_idx_num_gt(u16 fr_idx1,u16 fr_idx2)606 static inline bool dwc2_frame_idx_num_gt(u16 fr_idx1, u16 fr_idx2)
607 {
608 u16 diff = fr_idx1 - fr_idx2;
609 u16 sign = diff & (FRLISTEN_64_SIZE >> 1);
610
611 return diff && !sign;
612 }
613
614 /*
615 * Returns true if frame1 is less than or equal to frame2. The comparison is
616 * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the
617 * frame number when the max frame number is reached.
618 */
dwc2_frame_num_le(u16 frame1,u16 frame2)619 static inline int dwc2_frame_num_le(u16 frame1, u16 frame2)
620 {
621 return ((frame2 - frame1) & HFNUM_MAX_FRNUM) <= (HFNUM_MAX_FRNUM >> 1);
622 }
623
624 /*
625 * Returns true if frame1 is greater than frame2. The comparison is done
626 * modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
627 * number when the max frame number is reached.
628 */
dwc2_frame_num_gt(u16 frame1,u16 frame2)629 static inline int dwc2_frame_num_gt(u16 frame1, u16 frame2)
630 {
631 return (frame1 != frame2) &&
632 ((frame1 - frame2) & HFNUM_MAX_FRNUM) < (HFNUM_MAX_FRNUM >> 1);
633 }
634
635 /*
636 * Increments frame by the amount specified by inc. The addition is done
637 * modulo HFNUM_MAX_FRNUM. Returns the incremented value.
638 */
dwc2_frame_num_inc(u16 frame,u16 inc)639 static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc)
640 {
641 return (frame + inc) & HFNUM_MAX_FRNUM;
642 }
643
dwc2_frame_num_dec(u16 frame,u16 dec)644 static inline u16 dwc2_frame_num_dec(u16 frame, u16 dec)
645 {
646 return (frame + HFNUM_MAX_FRNUM + 1 - dec) & HFNUM_MAX_FRNUM;
647 }
648
dwc2_full_frame_num(u16 frame)649 static inline u16 dwc2_full_frame_num(u16 frame)
650 {
651 return (frame & HFNUM_MAX_FRNUM) >> 3;
652 }
653
dwc2_micro_frame_num(u16 frame)654 static inline u16 dwc2_micro_frame_num(u16 frame)
655 {
656 return frame & 0x7;
657 }
658
659 /*
660 * Returns the Core Interrupt Status register contents, ANDed with the Core
661 * Interrupt Mask register contents
662 */
dwc2_read_core_intr(struct dwc2_hsotg * hsotg)663 static inline u32 dwc2_read_core_intr(struct dwc2_hsotg *hsotg)
664 {
665 return dwc2_readl(hsotg, GINTSTS) &
666 dwc2_readl(hsotg, GINTMSK);
667 }
668
dwc2_hcd_urb_get_status(struct dwc2_hcd_urb * dwc2_urb)669 static inline u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb)
670 {
671 return dwc2_urb->status;
672 }
673
dwc2_hcd_urb_get_actual_length(struct dwc2_hcd_urb * dwc2_urb)674 static inline u32 dwc2_hcd_urb_get_actual_length(
675 struct dwc2_hcd_urb *dwc2_urb)
676 {
677 return dwc2_urb->actual_length;
678 }
679
dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb * dwc2_urb)680 static inline u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb)
681 {
682 return dwc2_urb->error_count;
683 }
684
dwc2_hcd_urb_set_iso_desc_params(struct dwc2_hcd_urb * dwc2_urb,int desc_num,u32 offset,u32 length)685 static inline void dwc2_hcd_urb_set_iso_desc_params(
686 struct dwc2_hcd_urb *dwc2_urb, int desc_num, u32 offset,
687 u32 length)
688 {
689 dwc2_urb->iso_descs[desc_num].offset = offset;
690 dwc2_urb->iso_descs[desc_num].length = length;
691 }
692
dwc2_hcd_urb_get_iso_desc_status(struct dwc2_hcd_urb * dwc2_urb,int desc_num)693 static inline u32 dwc2_hcd_urb_get_iso_desc_status(
694 struct dwc2_hcd_urb *dwc2_urb, int desc_num)
695 {
696 return dwc2_urb->iso_descs[desc_num].status;
697 }
698
dwc2_hcd_urb_get_iso_desc_actual_length(struct dwc2_hcd_urb * dwc2_urb,int desc_num)699 static inline u32 dwc2_hcd_urb_get_iso_desc_actual_length(
700 struct dwc2_hcd_urb *dwc2_urb, int desc_num)
701 {
702 return dwc2_urb->iso_descs[desc_num].actual_length;
703 }
704
dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg * hsotg,struct usb_host_endpoint * ep)705 static inline int dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg *hsotg,
706 struct usb_host_endpoint *ep)
707 {
708 struct dwc2_qh *qh = ep->hcpriv;
709
710 if (qh && !list_empty(&qh->qh_list_entry))
711 return 1;
712
713 return 0;
714 }
715
dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg * hsotg,struct usb_host_endpoint * ep)716 static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg,
717 struct usb_host_endpoint *ep)
718 {
719 struct dwc2_qh *qh = ep->hcpriv;
720
721 if (!qh) {
722 WARN_ON(1);
723 return 0;
724 }
725
726 return qh->host_us;
727 }
728
729 void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
730 struct dwc2_host_chan *chan, int chnum,
731 struct dwc2_qtd *qtd);
732
733 /* HCD Core API */
734
735 /**
736 * dwc2_handle_hcd_intr() - Called on every hardware interrupt
737 *
738 * @hsotg: The DWC2 HCD
739 *
740 * Returns IRQ_HANDLED if interrupt is handled
741 * Return IRQ_NONE if interrupt is not handled
742 */
743 irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg);
744
745 /**
746 * dwc2_hcd_stop() - Halts the DWC_otg host mode operation
747 *
748 * @hsotg: The DWC2 HCD
749 */
750 void dwc2_hcd_stop(struct dwc2_hsotg *hsotg);
751
752 /**
753 * dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host,
754 * and 0 otherwise
755 *
756 * @hsotg: The DWC2 HCD
757 */
758 int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg);
759
760 /**
761 * dwc2_hcd_dump_state() - Dumps hsotg state
762 *
763 * @hsotg: The DWC2 HCD
764 *
765 * NOTE: This function will be removed once the peripheral controller code
766 * is integrated and the driver is stable
767 */
768 void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg);
769
770 /* URB interface */
771
772 /* Transfer flags */
773 #define URB_GIVEBACK_ASAP 0x1
774 #define URB_SEND_ZERO_PACKET 0x2
775
776 /* Host driver callbacks */
777 struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg,
778 void *context, gfp_t mem_flags,
779 int *ttport);
780
781 void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg,
782 struct dwc2_tt *dwc_tt);
783 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context);
784 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
785 int status);
786
787 #endif /* __DWC2_HCD_H__ */
788