1 /*
2  * Copyright (c) 2001-2002 by David Brownell
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the
6  * Free Software Foundation; either version 2 of the License, or (at your
7  * option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18 
19 #ifndef __LINUX_EHCI_HCD_H
20 #define __LINUX_EHCI_HCD_H
21 
22 /* definitions used for the EHCI driver */
23 
24 /*
25  * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
26  * __leXX (normally) or __beXX (given EHCI_BIG_ENDIAN_DESC), depending on
27  * the host controller implementation.
28  *
29  * To facilitate the strongest possible byte-order checking from "sparse"
30  * and so on, we use __leXX unless that's not practical.
31  */
32 #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
33 typedef __u32 __bitwise __hc32;
34 typedef __u16 __bitwise __hc16;
35 #else
36 #define __hc32	__le32
37 #define __hc16	__le16
38 #endif
39 
40 /* statistics can be kept for tuning/monitoring */
41 struct ehci_stats {
42 	/* irq usage */
43 	unsigned long		normal;
44 	unsigned long		error;
45 	unsigned long		reclaim;
46 	unsigned long		lost_iaa;
47 
48 	/* termination of urbs from core */
49 	unsigned long		complete;
50 	unsigned long		unlink;
51 };
52 
53 /* ehci_hcd->lock guards shared data against other CPUs:
54  *   ehci_hcd:	async, reclaim, periodic (and shadow), ...
55  *   usb_host_endpoint: hcpriv
56  *   ehci_qh:	qh_next, qtd_list
57  *   ehci_qtd:	qtd_list
58  *
59  * Also, hold this lock when talking to HC registers or
60  * when updating hw_* fields in shared qh/qtd/... structures.
61  */
62 
63 #define	EHCI_MAX_ROOT_PORTS	15		/* see HCS_N_PORTS */
64 
65 enum ehci_rh_state {
66 	EHCI_RH_HALTED,
67 	EHCI_RH_SUSPENDED,
68 	EHCI_RH_RUNNING
69 };
70 
71 struct ehci_hcd {			/* one per controller */
72 	/* glue to PCI and HCD framework */
73 	struct ehci_caps __iomem *caps;
74 	struct ehci_regs __iomem *regs;
75 	struct ehci_dbg_port __iomem *debug;
76 
77 	__u32			hcs_params;	/* cached register copy */
78 	spinlock_t		lock;
79 	enum ehci_rh_state	rh_state;
80 
81 	/* async schedule support */
82 	struct ehci_qh		*async;
83 	struct ehci_qh		*dummy;		/* For AMD quirk use */
84 	struct ehci_qh		*reclaim;
85 	struct ehci_qh		*qh_scan_next;
86 	unsigned		scanning : 1;
87 
88 	/* periodic schedule support */
89 #define	DEFAULT_I_TDPS		1024		/* some HCs can do less */
90 	unsigned		periodic_size;
91 	__hc32			*periodic;	/* hw periodic table */
92 	dma_addr_t		periodic_dma;
93 	unsigned		i_thresh;	/* uframes HC might cache */
94 
95 	union ehci_shadow	*pshadow;	/* mirror hw periodic table */
96 	int			next_uframe;	/* scan periodic, start here */
97 	unsigned		periodic_sched;	/* periodic activity count */
98 	unsigned		uframe_periodic_max; /* max periodic time per uframe */
99 
100 
101 	/* list of itds & sitds completed while clock_frame was still active */
102 	struct list_head	cached_itd_list;
103 	struct list_head	cached_sitd_list;
104 	unsigned		clock_frame;
105 
106 	/* per root hub port */
107 	unsigned long		reset_done [EHCI_MAX_ROOT_PORTS];
108 
109 	/* bit vectors (one bit per port) */
110 	unsigned long		bus_suspended;		/* which ports were
111 			already suspended at the start of a bus suspend */
112 	unsigned long		companion_ports;	/* which ports are
113 			dedicated to the companion controller */
114 	unsigned long		owned_ports;		/* which ports are
115 			owned by the companion during a bus suspend */
116 	unsigned long		port_c_suspend;		/* which ports have
117 			the change-suspend feature turned on */
118 	unsigned long		suspended_ports;	/* which ports are
119 			suspended */
120 	unsigned long		resuming_ports;		/* which ports have
121 			started to resume */
122 
123 	/* per-HC memory pools (could be per-bus, but ...) */
124 	struct dma_pool		*qh_pool;	/* qh per active urb */
125 	struct dma_pool		*qtd_pool;	/* one or more per qh */
126 	struct dma_pool		*itd_pool;	/* itd per iso urb */
127 	struct dma_pool		*sitd_pool;	/* sitd per split iso urb */
128 
129 	struct timer_list	iaa_watchdog;
130 	struct timer_list	watchdog;
131 	unsigned long		actions;
132 	unsigned		periodic_stamp;
133 	unsigned		random_frame;
134 	unsigned long		next_statechange;
135 	ktime_t			last_periodic_enable;
136 	u32			command;
137 
138 	/* SILICON QUIRKS */
139 	unsigned		no_selective_suspend:1;
140 	unsigned		has_fsl_port_bug:1; /* FreeScale */
141 	unsigned		big_endian_mmio:1;
142 	unsigned		big_endian_desc:1;
143 	unsigned		big_endian_capbase:1;
144 	unsigned		has_amcc_usb23:1;
145 	unsigned		need_io_watchdog:1;
146 	unsigned		broken_periodic:1;
147 	unsigned		amd_pll_fix:1;
148 	unsigned		fs_i_thresh:1;	/* Intel iso scheduling */
149 	unsigned		use_dummy_qh:1;	/* AMD Frame List table quirk*/
150 	unsigned		has_synopsys_hc_bug:1; /* Synopsys HC */
151 	unsigned		frame_index_bug:1; /* MosChip (AKA NetMos) */
152 
153 	/* required for usb32 quirk */
154 	#define OHCI_CTRL_HCFS          (3 << 6)
155 	#define OHCI_USB_OPER           (2 << 6)
156 	#define OHCI_USB_SUSPEND        (3 << 6)
157 
158 	#define OHCI_HCCTRL_OFFSET      0x4
159 	#define OHCI_HCCTRL_LEN         0x4
160 	__hc32			*ohci_hcctrl_reg;
161 	unsigned		has_hostpc:1;
162 	unsigned		has_lpm:1;  /* support link power management */
163 	unsigned		has_ppcd:1; /* support per-port change bits */
164 	u8			sbrn;		/* packed release number */
165 
166 	/* irq statistics */
167 #ifdef EHCI_STATS
168 	struct ehci_stats	stats;
169 #	define COUNT(x) do { (x)++; } while (0)
170 #else
171 #	define COUNT(x) do {} while (0)
172 #endif
173 
174 	/* debug files */
175 #ifdef DEBUG
176 	struct dentry		*debug_dir;
177 #endif
178 	/*
179 	 * OTG controllers and transceivers need software interaction
180 	 */
181 	struct usb_phy	*transceiver;
182 };
183 
184 /* convert between an HCD pointer and the corresponding EHCI_HCD */
hcd_to_ehci(struct usb_hcd * hcd)185 static inline struct ehci_hcd *hcd_to_ehci (struct usb_hcd *hcd)
186 {
187 	return (struct ehci_hcd *) (hcd->hcd_priv);
188 }
ehci_to_hcd(struct ehci_hcd * ehci)189 static inline struct usb_hcd *ehci_to_hcd (struct ehci_hcd *ehci)
190 {
191 	return container_of ((void *) ehci, struct usb_hcd, hcd_priv);
192 }
193 
194 
195 static inline void
iaa_watchdog_start(struct ehci_hcd * ehci)196 iaa_watchdog_start(struct ehci_hcd *ehci)
197 {
198 	WARN_ON(timer_pending(&ehci->iaa_watchdog));
199 	mod_timer(&ehci->iaa_watchdog,
200 			jiffies + msecs_to_jiffies(EHCI_IAA_MSECS));
201 }
202 
iaa_watchdog_done(struct ehci_hcd * ehci)203 static inline void iaa_watchdog_done(struct ehci_hcd *ehci)
204 {
205 	del_timer(&ehci->iaa_watchdog);
206 }
207 
208 enum ehci_timer_action {
209 	TIMER_IO_WATCHDOG,
210 	TIMER_ASYNC_SHRINK,
211 	TIMER_ASYNC_OFF,
212 };
213 
214 static inline void
timer_action_done(struct ehci_hcd * ehci,enum ehci_timer_action action)215 timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
216 {
217 	clear_bit (action, &ehci->actions);
218 }
219 
220 static void free_cached_lists(struct ehci_hcd *ehci);
221 
222 /*-------------------------------------------------------------------------*/
223 
224 #include <linux/usb/ehci_def.h>
225 
226 /*-------------------------------------------------------------------------*/
227 
228 #define	QTD_NEXT(ehci, dma)	cpu_to_hc32(ehci, (u32)dma)
229 
230 /*
231  * EHCI Specification 0.95 Section 3.5
232  * QTD: describe data transfer components (buffer, direction, ...)
233  * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
234  *
235  * These are associated only with "QH" (Queue Head) structures,
236  * used with control, bulk, and interrupt transfers.
237  */
238 struct ehci_qtd {
239 	/* first part defined by EHCI spec */
240 	__hc32			hw_next;	/* see EHCI 3.5.1 */
241 	__hc32			hw_alt_next;    /* see EHCI 3.5.2 */
242 	__hc32			hw_token;       /* see EHCI 3.5.3 */
243 #define	QTD_TOGGLE	(1 << 31)	/* data toggle */
244 #define	QTD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
245 #define	QTD_IOC		(1 << 15)	/* interrupt on complete */
246 #define	QTD_CERR(tok)	(((tok)>>10) & 0x3)
247 #define	QTD_PID(tok)	(((tok)>>8) & 0x3)
248 #define	QTD_STS_ACTIVE	(1 << 7)	/* HC may execute this */
249 #define	QTD_STS_HALT	(1 << 6)	/* halted on error */
250 #define	QTD_STS_DBE	(1 << 5)	/* data buffer error (in HC) */
251 #define	QTD_STS_BABBLE	(1 << 4)	/* device was babbling (qtd halted) */
252 #define	QTD_STS_XACT	(1 << 3)	/* device gave illegal response */
253 #define	QTD_STS_MMF	(1 << 2)	/* incomplete split transaction */
254 #define	QTD_STS_STS	(1 << 1)	/* split transaction state */
255 #define	QTD_STS_PING	(1 << 0)	/* issue PING? */
256 
257 #define ACTIVE_BIT(ehci)	cpu_to_hc32(ehci, QTD_STS_ACTIVE)
258 #define HALT_BIT(ehci)		cpu_to_hc32(ehci, QTD_STS_HALT)
259 #define STATUS_BIT(ehci)	cpu_to_hc32(ehci, QTD_STS_STS)
260 
261 	__hc32			hw_buf [5];        /* see EHCI 3.5.4 */
262 	__hc32			hw_buf_hi [5];        /* Appendix B */
263 
264 	/* the rest is HCD-private */
265 	dma_addr_t		qtd_dma;		/* qtd address */
266 	struct list_head	qtd_list;		/* sw qtd list */
267 	struct urb		*urb;			/* qtd's urb */
268 	size_t			length;			/* length of buffer */
269 } __attribute__ ((aligned (32)));
270 
271 /* mask NakCnt+T in qh->hw_alt_next */
272 #define QTD_MASK(ehci)	cpu_to_hc32 (ehci, ~0x1f)
273 
274 #define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
275 
276 /*-------------------------------------------------------------------------*/
277 
278 /* type tag from {qh,itd,sitd,fstn}->hw_next */
279 #define Q_NEXT_TYPE(ehci,dma)	((dma) & cpu_to_hc32(ehci, 3 << 1))
280 
281 /*
282  * Now the following defines are not converted using the
283  * cpu_to_le32() macro anymore, since we have to support
284  * "dynamic" switching between be and le support, so that the driver
285  * can be used on one system with SoC EHCI controller using big-endian
286  * descriptors as well as a normal little-endian PCI EHCI controller.
287  */
288 /* values for that type tag */
289 #define Q_TYPE_ITD	(0 << 1)
290 #define Q_TYPE_QH	(1 << 1)
291 #define Q_TYPE_SITD	(2 << 1)
292 #define Q_TYPE_FSTN	(3 << 1)
293 
294 /* next async queue entry, or pointer to interrupt/periodic QH */
295 #define QH_NEXT(ehci,dma)	(cpu_to_hc32(ehci, (((u32)dma)&~0x01f)|Q_TYPE_QH))
296 
297 /* for periodic/async schedules and qtd lists, mark end of list */
298 #define EHCI_LIST_END(ehci)	cpu_to_hc32(ehci, 1) /* "null pointer" to hw */
299 
300 /*
301  * Entries in periodic shadow table are pointers to one of four kinds
302  * of data structure.  That's dictated by the hardware; a type tag is
303  * encoded in the low bits of the hardware's periodic schedule.  Use
304  * Q_NEXT_TYPE to get the tag.
305  *
306  * For entries in the async schedule, the type tag always says "qh".
307  */
308 union ehci_shadow {
309 	struct ehci_qh		*qh;		/* Q_TYPE_QH */
310 	struct ehci_itd		*itd;		/* Q_TYPE_ITD */
311 	struct ehci_sitd	*sitd;		/* Q_TYPE_SITD */
312 	struct ehci_fstn	*fstn;		/* Q_TYPE_FSTN */
313 	__hc32			*hw_next;	/* (all types) */
314 	void			*ptr;
315 };
316 
317 /*-------------------------------------------------------------------------*/
318 
319 /*
320  * EHCI Specification 0.95 Section 3.6
321  * QH: describes control/bulk/interrupt endpoints
322  * See Fig 3-7 "Queue Head Structure Layout".
323  *
324  * These appear in both the async and (for interrupt) periodic schedules.
325  */
326 
327 /* first part defined by EHCI spec */
328 struct ehci_qh_hw {
329 	__hc32			hw_next;	/* see EHCI 3.6.1 */
330 	__hc32			hw_info1;       /* see EHCI 3.6.2 */
331 #define	QH_HEAD		0x00008000
332 	__hc32			hw_info2;        /* see EHCI 3.6.2 */
333 #define	QH_SMASK	0x000000ff
334 #define	QH_CMASK	0x0000ff00
335 #define	QH_HUBADDR	0x007f0000
336 #define	QH_HUBPORT	0x3f800000
337 #define	QH_MULT		0xc0000000
338 	__hc32			hw_current;	/* qtd list - see EHCI 3.6.4 */
339 
340 	/* qtd overlay (hardware parts of a struct ehci_qtd) */
341 	__hc32			hw_qtd_next;
342 	__hc32			hw_alt_next;
343 	__hc32			hw_token;
344 	__hc32			hw_buf [5];
345 	__hc32			hw_buf_hi [5];
346 } __attribute__ ((aligned(32)));
347 
348 struct ehci_qh {
349 	struct ehci_qh_hw	*hw;
350 	/* the rest is HCD-private */
351 	dma_addr_t		qh_dma;		/* address of qh */
352 	union ehci_shadow	qh_next;	/* ptr to qh; or periodic */
353 	struct list_head	qtd_list;	/* sw qtd list */
354 	struct ehci_qtd		*dummy;
355 	struct ehci_qh		*reclaim;	/* next to reclaim */
356 
357 	struct ehci_hcd		*ehci;
358 	unsigned long		unlink_time;
359 
360 	/*
361 	 * Do NOT use atomic operations for QH refcounting. On some CPUs
362 	 * (PPC7448 for example), atomic operations cannot be performed on
363 	 * memory that is cache-inhibited (i.e. being used for DMA).
364 	 * Spinlocks are used to protect all QH fields.
365 	 */
366 	u32			refcount;
367 	unsigned		stamp;
368 
369 	u8			needs_rescan;	/* Dequeue during giveback */
370 	u8			qh_state;
371 #define	QH_STATE_LINKED		1		/* HC sees this */
372 #define	QH_STATE_UNLINK		2		/* HC may still see this */
373 #define	QH_STATE_IDLE		3		/* HC doesn't see this */
374 #define	QH_STATE_UNLINK_WAIT	4		/* LINKED and on reclaim q */
375 #define	QH_STATE_COMPLETING	5		/* don't touch token.HALT */
376 
377 	u8			xacterrs;	/* XactErr retry counter */
378 #define	QH_XACTERR_MAX		32		/* XactErr retry limit */
379 
380 	/* periodic schedule info */
381 	u8			usecs;		/* intr bandwidth */
382 	u8			gap_uf;		/* uframes split/csplit gap */
383 	u8			c_usecs;	/* ... split completion bw */
384 	u16			tt_usecs;	/* tt downstream bandwidth */
385 	unsigned short		period;		/* polling interval */
386 	unsigned short		start;		/* where polling starts */
387 #define NO_FRAME ((unsigned short)~0)			/* pick new start */
388 
389 	struct usb_device	*dev;		/* access to TT */
390 	unsigned		is_out:1;	/* bulk or intr OUT */
391 	unsigned		clearing_tt:1;	/* Clear-TT-Buf in progress */
392 };
393 
394 /*-------------------------------------------------------------------------*/
395 
396 /* description of one iso transaction (up to 3 KB data if highspeed) */
397 struct ehci_iso_packet {
398 	/* These will be copied to iTD when scheduling */
399 	u64			bufp;		/* itd->hw_bufp{,_hi}[pg] |= */
400 	__hc32			transaction;	/* itd->hw_transaction[i] |= */
401 	u8			cross;		/* buf crosses pages */
402 	/* for full speed OUT splits */
403 	u32			buf1;
404 };
405 
406 /* temporary schedule data for packets from iso urbs (both speeds)
407  * each packet is one logical usb transaction to the device (not TT),
408  * beginning at stream->next_uframe
409  */
410 struct ehci_iso_sched {
411 	struct list_head	td_list;
412 	unsigned		span;
413 	struct ehci_iso_packet	packet [0];
414 };
415 
416 /*
417  * ehci_iso_stream - groups all (s)itds for this endpoint.
418  * acts like a qh would, if EHCI had them for ISO.
419  */
420 struct ehci_iso_stream {
421 	/* first field matches ehci_hq, but is NULL */
422 	struct ehci_qh_hw	*hw;
423 
424 	u32			refcount;
425 	u8			bEndpointAddress;
426 	u8			highspeed;
427 	struct list_head	td_list;	/* queued itds/sitds */
428 	struct list_head	free_list;	/* list of unused itds/sitds */
429 	struct usb_device	*udev;
430 	struct usb_host_endpoint *ep;
431 
432 	/* output of (re)scheduling */
433 	int			next_uframe;
434 	__hc32			splits;
435 
436 	/* the rest is derived from the endpoint descriptor,
437 	 * trusting urb->interval == f(epdesc->bInterval) and
438 	 * including the extra info for hw_bufp[0..2]
439 	 */
440 	u8			usecs, c_usecs;
441 	u16			interval;
442 	u16			tt_usecs;
443 	u16			maxp;
444 	u16			raw_mask;
445 	unsigned		bandwidth;
446 
447 	/* This is used to initialize iTD's hw_bufp fields */
448 	__hc32			buf0;
449 	__hc32			buf1;
450 	__hc32			buf2;
451 
452 	/* this is used to initialize sITD's tt info */
453 	__hc32			address;
454 };
455 
456 /*-------------------------------------------------------------------------*/
457 
458 /*
459  * EHCI Specification 0.95 Section 3.3
460  * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
461  *
462  * Schedule records for high speed iso xfers
463  */
464 struct ehci_itd {
465 	/* first part defined by EHCI spec */
466 	__hc32			hw_next;           /* see EHCI 3.3.1 */
467 	__hc32			hw_transaction [8]; /* see EHCI 3.3.2 */
468 #define EHCI_ISOC_ACTIVE        (1<<31)        /* activate transfer this slot */
469 #define EHCI_ISOC_BUF_ERR       (1<<30)        /* Data buffer error */
470 #define EHCI_ISOC_BABBLE        (1<<29)        /* babble detected */
471 #define EHCI_ISOC_XACTERR       (1<<28)        /* XactErr - transaction error */
472 #define	EHCI_ITD_LENGTH(tok)	(((tok)>>16) & 0x0fff)
473 #define	EHCI_ITD_IOC		(1 << 15)	/* interrupt on complete */
474 
475 #define ITD_ACTIVE(ehci)	cpu_to_hc32(ehci, EHCI_ISOC_ACTIVE)
476 
477 	__hc32			hw_bufp [7];	/* see EHCI 3.3.3 */
478 	__hc32			hw_bufp_hi [7];	/* Appendix B */
479 
480 	/* the rest is HCD-private */
481 	dma_addr_t		itd_dma;	/* for this itd */
482 	union ehci_shadow	itd_next;	/* ptr to periodic q entry */
483 
484 	struct urb		*urb;
485 	struct ehci_iso_stream	*stream;	/* endpoint's queue */
486 	struct list_head	itd_list;	/* list of stream's itds */
487 
488 	/* any/all hw_transactions here may be used by that urb */
489 	unsigned		frame;		/* where scheduled */
490 	unsigned		pg;
491 	unsigned		index[8];	/* in urb->iso_frame_desc */
492 } __attribute__ ((aligned (32)));
493 
494 /*-------------------------------------------------------------------------*/
495 
496 /*
497  * EHCI Specification 0.95 Section 3.4
498  * siTD, aka split-transaction isochronous Transfer Descriptor
499  *       ... describe full speed iso xfers through TT in hubs
500  * see Figure 3-5 "Split-transaction Isochronous Transaction Descriptor (siTD)
501  */
502 struct ehci_sitd {
503 	/* first part defined by EHCI spec */
504 	__hc32			hw_next;
505 /* uses bit field macros above - see EHCI 0.95 Table 3-8 */
506 	__hc32			hw_fullspeed_ep;	/* EHCI table 3-9 */
507 	__hc32			hw_uframe;		/* EHCI table 3-10 */
508 	__hc32			hw_results;		/* EHCI table 3-11 */
509 #define	SITD_IOC	(1 << 31)	/* interrupt on completion */
510 #define	SITD_PAGE	(1 << 30)	/* buffer 0/1 */
511 #define	SITD_LENGTH(x)	(0x3ff & ((x)>>16))
512 #define	SITD_STS_ACTIVE	(1 << 7)	/* HC may execute this */
513 #define	SITD_STS_ERR	(1 << 6)	/* error from TT */
514 #define	SITD_STS_DBE	(1 << 5)	/* data buffer error (in HC) */
515 #define	SITD_STS_BABBLE	(1 << 4)	/* device was babbling */
516 #define	SITD_STS_XACT	(1 << 3)	/* illegal IN response */
517 #define	SITD_STS_MMF	(1 << 2)	/* incomplete split transaction */
518 #define	SITD_STS_STS	(1 << 1)	/* split transaction state */
519 
520 #define SITD_ACTIVE(ehci)	cpu_to_hc32(ehci, SITD_STS_ACTIVE)
521 
522 	__hc32			hw_buf [2];		/* EHCI table 3-12 */
523 	__hc32			hw_backpointer;		/* EHCI table 3-13 */
524 	__hc32			hw_buf_hi [2];		/* Appendix B */
525 
526 	/* the rest is HCD-private */
527 	dma_addr_t		sitd_dma;
528 	union ehci_shadow	sitd_next;	/* ptr to periodic q entry */
529 
530 	struct urb		*urb;
531 	struct ehci_iso_stream	*stream;	/* endpoint's queue */
532 	struct list_head	sitd_list;	/* list of stream's sitds */
533 	unsigned		frame;
534 	unsigned		index;
535 } __attribute__ ((aligned (32)));
536 
537 /*-------------------------------------------------------------------------*/
538 
539 /*
540  * EHCI Specification 0.96 Section 3.7
541  * Periodic Frame Span Traversal Node (FSTN)
542  *
543  * Manages split interrupt transactions (using TT) that span frame boundaries
544  * into uframes 0/1; see 4.12.2.2.  In those uframes, a "save place" FSTN
545  * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
546  * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
547  */
548 struct ehci_fstn {
549 	__hc32			hw_next;	/* any periodic q entry */
550 	__hc32			hw_prev;	/* qh or EHCI_LIST_END */
551 
552 	/* the rest is HCD-private */
553 	dma_addr_t		fstn_dma;
554 	union ehci_shadow	fstn_next;	/* ptr to periodic q entry */
555 } __attribute__ ((aligned (32)));
556 
557 /*-------------------------------------------------------------------------*/
558 
559 /* Prepare the PORTSC wakeup flags during controller suspend/resume */
560 
561 #define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup)	\
562 		ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup);
563 
564 #define ehci_prepare_ports_for_controller_resume(ehci)			\
565 		ehci_adjust_port_wakeup_flags(ehci, false, false);
566 
567 /*-------------------------------------------------------------------------*/
568 
569 #ifdef CONFIG_USB_EHCI_ROOT_HUB_TT
570 
571 /*
572  * Some EHCI controllers have a Transaction Translator built into the
573  * root hub. This is a non-standard feature.  Each controller will need
574  * to add code to the following inline functions, and call them as
575  * needed (mostly in root hub code).
576  */
577 
578 #define	ehci_is_TDI(e)			(ehci_to_hcd(e)->has_tt)
579 
580 /* Returns the speed of a device attached to a port on the root hub. */
581 static inline unsigned int
ehci_port_speed(struct ehci_hcd * ehci,unsigned int portsc)582 ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
583 {
584 	if (ehci_is_TDI(ehci)) {
585 		switch ((portsc >> (ehci->has_hostpc ? 25 : 26)) & 3) {
586 		case 0:
587 			return 0;
588 		case 1:
589 			return USB_PORT_STAT_LOW_SPEED;
590 		case 2:
591 		default:
592 			return USB_PORT_STAT_HIGH_SPEED;
593 		}
594 	}
595 	return USB_PORT_STAT_HIGH_SPEED;
596 }
597 
598 #else
599 
600 #define	ehci_is_TDI(e)			(0)
601 
602 #define	ehci_port_speed(ehci, portsc)	USB_PORT_STAT_HIGH_SPEED
603 #endif
604 
605 /*-------------------------------------------------------------------------*/
606 
607 #ifdef CONFIG_PPC_83xx
608 /* Some Freescale processors have an erratum in which the TT
609  * port number in the queue head was 0..N-1 instead of 1..N.
610  */
611 #define	ehci_has_fsl_portno_bug(e)		((e)->has_fsl_port_bug)
612 #else
613 #define	ehci_has_fsl_portno_bug(e)		(0)
614 #endif
615 
616 /*
617  * While most USB host controllers implement their registers in
618  * little-endian format, a minority (celleb companion chip) implement
619  * them in big endian format.
620  *
621  * This attempts to support either format at compile time without a
622  * runtime penalty, or both formats with the additional overhead
623  * of checking a flag bit.
624  *
625  * ehci_big_endian_capbase is a special quirk for controllers that
626  * implement the HC capability registers as separate registers and not
627  * as fields of a 32-bit register.
628  */
629 
630 #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
631 #define ehci_big_endian_mmio(e)		((e)->big_endian_mmio)
632 #define ehci_big_endian_capbase(e)	((e)->big_endian_capbase)
633 #else
634 #define ehci_big_endian_mmio(e)		0
635 #define ehci_big_endian_capbase(e)	0
636 #endif
637 
638 /*
639  * Big-endian read/write functions are arch-specific.
640  * Other arches can be added if/when they're needed.
641  */
642 #if defined(CONFIG_ARM) && defined(CONFIG_ARCH_IXP4XX)
643 #define readl_be(addr)		__raw_readl((__force unsigned *)addr)
644 #define writel_be(val, addr)	__raw_writel(val, (__force unsigned *)addr)
645 #endif
646 
ehci_readl(const struct ehci_hcd * ehci,__u32 __iomem * regs)647 static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
648 		__u32 __iomem * regs)
649 {
650 #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
651 	return ehci_big_endian_mmio(ehci) ?
652 		readl_be(regs) :
653 		readl(regs);
654 #else
655 	return readl(regs);
656 #endif
657 }
658 
ehci_writel(const struct ehci_hcd * ehci,const unsigned int val,__u32 __iomem * regs)659 static inline void ehci_writel(const struct ehci_hcd *ehci,
660 		const unsigned int val, __u32 __iomem *regs)
661 {
662 #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
663 	ehci_big_endian_mmio(ehci) ?
664 		writel_be(val, regs) :
665 		writel(val, regs);
666 #else
667 	writel(val, regs);
668 #endif
669 }
670 
671 /*
672  * On certain ppc-44x SoC there is a HW issue, that could only worked around with
673  * explicit suspend/operate of OHCI. This function hereby makes sense only on that arch.
674  * Other common bits are dependent on has_amcc_usb23 quirk flag.
675  */
676 #ifdef CONFIG_44x
set_ohci_hcfs(struct ehci_hcd * ehci,int operational)677 static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
678 {
679 	u32 hc_control;
680 
681 	hc_control = (readl_be(ehci->ohci_hcctrl_reg) & ~OHCI_CTRL_HCFS);
682 	if (operational)
683 		hc_control |= OHCI_USB_OPER;
684 	else
685 		hc_control |= OHCI_USB_SUSPEND;
686 
687 	writel_be(hc_control, ehci->ohci_hcctrl_reg);
688 	(void) readl_be(ehci->ohci_hcctrl_reg);
689 }
690 #else
set_ohci_hcfs(struct ehci_hcd * ehci,int operational)691 static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
692 { }
693 #endif
694 
695 /*-------------------------------------------------------------------------*/
696 
697 /*
698  * The AMCC 440EPx not only implements its EHCI registers in big-endian
699  * format, but also its DMA data structures (descriptors).
700  *
701  * EHCI controllers accessed through PCI work normally (little-endian
702  * everywhere), so we won't bother supporting a BE-only mode for now.
703  */
704 #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC
705 #define ehci_big_endian_desc(e)		((e)->big_endian_desc)
706 
707 /* cpu to ehci */
cpu_to_hc32(const struct ehci_hcd * ehci,const u32 x)708 static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
709 {
710 	return ehci_big_endian_desc(ehci)
711 		? (__force __hc32)cpu_to_be32(x)
712 		: (__force __hc32)cpu_to_le32(x);
713 }
714 
715 /* ehci to cpu */
hc32_to_cpu(const struct ehci_hcd * ehci,const __hc32 x)716 static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x)
717 {
718 	return ehci_big_endian_desc(ehci)
719 		? be32_to_cpu((__force __be32)x)
720 		: le32_to_cpu((__force __le32)x);
721 }
722 
hc32_to_cpup(const struct ehci_hcd * ehci,const __hc32 * x)723 static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
724 {
725 	return ehci_big_endian_desc(ehci)
726 		? be32_to_cpup((__force __be32 *)x)
727 		: le32_to_cpup((__force __le32 *)x);
728 }
729 
730 #else
731 
732 /* cpu to ehci */
cpu_to_hc32(const struct ehci_hcd * ehci,const u32 x)733 static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
734 {
735 	return cpu_to_le32(x);
736 }
737 
738 /* ehci to cpu */
hc32_to_cpu(const struct ehci_hcd * ehci,const __hc32 x)739 static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x)
740 {
741 	return le32_to_cpu(x);
742 }
743 
hc32_to_cpup(const struct ehci_hcd * ehci,const __hc32 * x)744 static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
745 {
746 	return le32_to_cpup(x);
747 }
748 
749 #endif
750 
751 /*-------------------------------------------------------------------------*/
752 
753 #ifdef CONFIG_PCI
754 
755 /* For working around the MosChip frame-index-register bug */
756 static unsigned ehci_read_frame_index(struct ehci_hcd *ehci);
757 
758 #else
759 
ehci_read_frame_index(struct ehci_hcd * ehci)760 static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci)
761 {
762 	return ehci_readl(ehci, &ehci->regs->frame_index);
763 }
764 
765 #endif
766 
767 /*-------------------------------------------------------------------------*/
768 
769 #ifndef DEBUG
770 #define STUB_DEBUG_FILES
771 #endif	/* DEBUG */
772 
773 /*-------------------------------------------------------------------------*/
774 
775 #endif /* __LINUX_EHCI_HCD_H */
776