1 /*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * This file is licenced under the GPL.
8 */
9
10 /*-------------------------------------------------------------------------*/
11
12 /*
13 * OHCI deals with three types of memory:
14 * - data used only by the HCD ... kmalloc is fine
15 * - async and periodic schedules, shared by HC and HCD ... these
16 * need to use dma_pool or dma_alloc_coherent
17 * - driver buffers, read/written by HC ... the hcd glue or the
18 * device driver provides us with dma addresses
19 *
20 * There's also "register" data, which is memory mapped.
21 * No memory seen by this driver (or any HCD) may be paged out.
22 */
23
24 /*-------------------------------------------------------------------------*/
25
ohci_hcd_init(struct ohci_hcd * ohci)26 static void ohci_hcd_init (struct ohci_hcd *ohci)
27 {
28 ohci->next_statechange = jiffies;
29 spin_lock_init (&ohci->lock);
30 INIT_LIST_HEAD (&ohci->pending);
31 }
32
33 /*-------------------------------------------------------------------------*/
34
ohci_mem_init(struct ohci_hcd * ohci)35 static int ohci_mem_init (struct ohci_hcd *ohci)
36 {
37 ohci->td_cache = dma_pool_create ("ohci_td",
38 ohci_to_hcd(ohci)->self.controller,
39 sizeof (struct td),
40 32 /* byte alignment */,
41 0 /* no page-crossing issues */);
42 if (!ohci->td_cache)
43 return -ENOMEM;
44 ohci->ed_cache = dma_pool_create ("ohci_ed",
45 ohci_to_hcd(ohci)->self.controller,
46 sizeof (struct ed),
47 16 /* byte alignment */,
48 0 /* no page-crossing issues */);
49 if (!ohci->ed_cache) {
50 dma_pool_destroy (ohci->td_cache);
51 return -ENOMEM;
52 }
53 return 0;
54 }
55
ohci_mem_cleanup(struct ohci_hcd * ohci)56 static void ohci_mem_cleanup (struct ohci_hcd *ohci)
57 {
58 if (ohci->td_cache) {
59 dma_pool_destroy (ohci->td_cache);
60 ohci->td_cache = NULL;
61 }
62 if (ohci->ed_cache) {
63 dma_pool_destroy (ohci->ed_cache);
64 ohci->ed_cache = NULL;
65 }
66 }
67
68 /*-------------------------------------------------------------------------*/
69
70 /* ohci "done list" processing needs this mapping */
71 static inline struct td *
dma_to_td(struct ohci_hcd * hc,dma_addr_t td_dma)72 dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma)
73 {
74 struct td *td;
75
76 td_dma &= TD_MASK;
77 td = hc->td_hash [TD_HASH_FUNC(td_dma)];
78 while (td && td->td_dma != td_dma)
79 td = td->td_hash;
80 return td;
81 }
82
83 /* TDs ... */
84 static struct td *
td_alloc(struct ohci_hcd * hc,gfp_t mem_flags)85 td_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
86 {
87 dma_addr_t dma;
88 struct td *td;
89
90 td = dma_pool_alloc (hc->td_cache, mem_flags, &dma);
91 if (td) {
92 /* in case hc fetches it, make it look dead */
93 memset (td, 0, sizeof *td);
94 td->hwNextTD = cpu_to_hc32 (hc, dma);
95 td->td_dma = dma;
96 /* hashed in td_fill */
97 }
98 return td;
99 }
100
101 static void
td_free(struct ohci_hcd * hc,struct td * td)102 td_free (struct ohci_hcd *hc, struct td *td)
103 {
104 struct td **prev = &hc->td_hash [TD_HASH_FUNC (td->td_dma)];
105
106 while (*prev && *prev != td)
107 prev = &(*prev)->td_hash;
108 if (*prev)
109 *prev = td->td_hash;
110 else if ((td->hwINFO & cpu_to_hc32(hc, TD_DONE)) != 0)
111 ohci_dbg (hc, "no hash for td %p\n", td);
112 dma_pool_free (hc->td_cache, td, td->td_dma);
113 }
114
115 /*-------------------------------------------------------------------------*/
116
117 /* EDs ... */
118 static struct ed *
ed_alloc(struct ohci_hcd * hc,gfp_t mem_flags)119 ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
120 {
121 dma_addr_t dma;
122 struct ed *ed;
123
124 ed = dma_pool_alloc (hc->ed_cache, mem_flags, &dma);
125 if (ed) {
126 memset (ed, 0, sizeof (*ed));
127 INIT_LIST_HEAD (&ed->td_list);
128 ed->dma = dma;
129 }
130 return ed;
131 }
132
133 static void
ed_free(struct ohci_hcd * hc,struct ed * ed)134 ed_free (struct ohci_hcd *hc, struct ed *ed)
135 {
136 dma_pool_free (hc->ed_cache, ed, ed->dma);
137 }
138
139