1 /*
2 * DMA helper routines for Freescale STMP37XX/STMP378X
3 *
4 * Author: dmitry pervushin <dpervushin@embeddedalley.com>
5 *
6 * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
7 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
8 */
9
10 /*
11 * The code contained herein is licensed under the GNU General Public
12 * License. You may obtain a copy of the GNU General Public License
13 * Version 2 or later at the following locations:
14 *
15 * http://www.opensource.org/licenses/gpl-license.html
16 * http://www.gnu.org/copyleft/gpl.html
17 */
18 #include <linux/gfp.h>
19 #include <linux/kernel.h>
20 #include <linux/device.h>
21 #include <linux/dmapool.h>
22 #include <linux/sysdev.h>
23 #include <linux/cpufreq.h>
24
25 #include <asm/page.h>
26
27 #include <mach/platform.h>
28 #include <mach/dma.h>
29 #include <mach/regs-apbx.h>
30 #include <mach/regs-apbh.h>
31
32 static const size_t pool_item_size = sizeof(struct stmp3xxx_dma_command);
33 static const size_t pool_alignment = 8;
34 static struct stmp3xxx_dma_user {
35 void *pool;
36 int inuse;
37 const char *name;
38 } channels[MAX_DMA_CHANNELS];
39
40 #define IS_VALID_CHANNEL(ch) ((ch) >= 0 && (ch) < MAX_DMA_CHANNELS)
41 #define IS_USED(ch) (channels[ch].inuse)
42
stmp3xxx_dma_request(int ch,struct device * dev,const char * name)43 int stmp3xxx_dma_request(int ch, struct device *dev, const char *name)
44 {
45 struct stmp3xxx_dma_user *user;
46 int err = 0;
47
48 user = channels + ch;
49 if (!IS_VALID_CHANNEL(ch)) {
50 err = -ENODEV;
51 goto out;
52 }
53 if (IS_USED(ch)) {
54 err = -EBUSY;
55 goto out;
56 }
57 /* Create a pool to allocate dma commands from */
58 user->pool = dma_pool_create(name, dev, pool_item_size,
59 pool_alignment, PAGE_SIZE);
60 if (user->pool == NULL) {
61 err = -ENOMEM;
62 goto out;
63 }
64 user->name = name;
65 user->inuse++;
66 out:
67 return err;
68 }
69 EXPORT_SYMBOL(stmp3xxx_dma_request);
70
stmp3xxx_dma_release(int ch)71 int stmp3xxx_dma_release(int ch)
72 {
73 struct stmp3xxx_dma_user *user = channels + ch;
74 int err = 0;
75
76 if (!IS_VALID_CHANNEL(ch)) {
77 err = -ENODEV;
78 goto out;
79 }
80 if (!IS_USED(ch)) {
81 err = -EBUSY;
82 goto out;
83 }
84 BUG_ON(user->pool == NULL);
85 dma_pool_destroy(user->pool);
86 user->inuse--;
87 out:
88 return err;
89 }
90 EXPORT_SYMBOL(stmp3xxx_dma_release);
91
stmp3xxx_dma_read_semaphore(int channel)92 int stmp3xxx_dma_read_semaphore(int channel)
93 {
94 int sem = -1;
95
96 switch (STMP3XXX_DMA_BUS(channel)) {
97 case STMP3XXX_BUS_APBH:
98 sem = __raw_readl(REGS_APBH_BASE + HW_APBH_CHn_SEMA +
99 STMP3XXX_DMA_CHANNEL(channel) * 0x70);
100 sem &= BM_APBH_CHn_SEMA_PHORE;
101 sem >>= BP_APBH_CHn_SEMA_PHORE;
102 break;
103
104 case STMP3XXX_BUS_APBX:
105 sem = __raw_readl(REGS_APBX_BASE + HW_APBX_CHn_SEMA +
106 STMP3XXX_DMA_CHANNEL(channel) * 0x70);
107 sem &= BM_APBX_CHn_SEMA_PHORE;
108 sem >>= BP_APBX_CHn_SEMA_PHORE;
109 break;
110 default:
111 BUG();
112 }
113 return sem;
114 }
115 EXPORT_SYMBOL(stmp3xxx_dma_read_semaphore);
116
stmp3xxx_dma_allocate_command(int channel,struct stmp3xxx_dma_descriptor * descriptor)117 int stmp3xxx_dma_allocate_command(int channel,
118 struct stmp3xxx_dma_descriptor *descriptor)
119 {
120 struct stmp3xxx_dma_user *user = channels + channel;
121 int err = 0;
122
123 if (!IS_VALID_CHANNEL(channel)) {
124 err = -ENODEV;
125 goto out;
126 }
127 if (!IS_USED(channel)) {
128 err = -EBUSY;
129 goto out;
130 }
131 if (descriptor == NULL) {
132 err = -EINVAL;
133 goto out;
134 }
135
136 /* Allocate memory for a command from the buffer */
137 descriptor->command =
138 dma_pool_alloc(user->pool, GFP_KERNEL, &descriptor->handle);
139
140 /* Check it worked */
141 if (!descriptor->command) {
142 err = -ENOMEM;
143 goto out;
144 }
145
146 memset(descriptor->command, 0, pool_item_size);
147 out:
148 WARN_ON(err);
149 return err;
150 }
151 EXPORT_SYMBOL(stmp3xxx_dma_allocate_command);
152
stmp3xxx_dma_free_command(int channel,struct stmp3xxx_dma_descriptor * descriptor)153 int stmp3xxx_dma_free_command(int channel,
154 struct stmp3xxx_dma_descriptor *descriptor)
155 {
156 int err = 0;
157
158 if (!IS_VALID_CHANNEL(channel)) {
159 err = -ENODEV;
160 goto out;
161 }
162 if (!IS_USED(channel)) {
163 err = -EBUSY;
164 goto out;
165 }
166
167 /* Return the command memory to the pool */
168 dma_pool_free(channels[channel].pool, descriptor->command,
169 descriptor->handle);
170
171 /* Initialise descriptor so we're not tempted to use it */
172 descriptor->command = NULL;
173 descriptor->handle = 0;
174 descriptor->virtual_buf_ptr = NULL;
175 descriptor->next_descr = NULL;
176
177 WARN_ON(err);
178 out:
179 return err;
180 }
181 EXPORT_SYMBOL(stmp3xxx_dma_free_command);
182
stmp3xxx_dma_go(int channel,struct stmp3xxx_dma_descriptor * head,u32 semaphore)183 void stmp3xxx_dma_go(int channel,
184 struct stmp3xxx_dma_descriptor *head, u32 semaphore)
185 {
186 int ch = STMP3XXX_DMA_CHANNEL(channel);
187 void __iomem *c, *s;
188
189 switch (STMP3XXX_DMA_BUS(channel)) {
190 case STMP3XXX_BUS_APBH:
191 c = REGS_APBH_BASE + HW_APBH_CHn_NXTCMDAR + 0x70 * ch;
192 s = REGS_APBH_BASE + HW_APBH_CHn_SEMA + 0x70 * ch;
193 break;
194
195 case STMP3XXX_BUS_APBX:
196 c = REGS_APBX_BASE + HW_APBX_CHn_NXTCMDAR + 0x70 * ch;
197 s = REGS_APBX_BASE + HW_APBX_CHn_SEMA + 0x70 * ch;
198 break;
199
200 default:
201 return;
202 }
203
204 /* Set next command */
205 __raw_writel(head->handle, c);
206 /* Set counting semaphore (kicks off transfer). Assumes
207 peripheral has been set up correctly */
208 __raw_writel(semaphore, s);
209 }
210 EXPORT_SYMBOL(stmp3xxx_dma_go);
211
stmp3xxx_dma_running(int channel)212 int stmp3xxx_dma_running(int channel)
213 {
214 switch (STMP3XXX_DMA_BUS(channel)) {
215 case STMP3XXX_BUS_APBH:
216 return (__raw_readl(REGS_APBH_BASE + HW_APBH_CHn_SEMA +
217 0x70 * STMP3XXX_DMA_CHANNEL(channel))) &
218 BM_APBH_CHn_SEMA_PHORE;
219
220 case STMP3XXX_BUS_APBX:
221 return (__raw_readl(REGS_APBX_BASE + HW_APBX_CHn_SEMA +
222 0x70 * STMP3XXX_DMA_CHANNEL(channel))) &
223 BM_APBX_CHn_SEMA_PHORE;
224 default:
225 BUG();
226 return 0;
227 }
228 }
229 EXPORT_SYMBOL(stmp3xxx_dma_running);
230
231 /*
232 * Circular dma chain management
233 */
stmp3xxx_dma_free_chain(struct stmp37xx_circ_dma_chain * chain)234 void stmp3xxx_dma_free_chain(struct stmp37xx_circ_dma_chain *chain)
235 {
236 int i;
237
238 for (i = 0; i < chain->total_count; i++)
239 stmp3xxx_dma_free_command(
240 STMP3XXX_DMA(chain->channel, chain->bus),
241 &chain->chain[i]);
242 }
243 EXPORT_SYMBOL(stmp3xxx_dma_free_chain);
244
stmp3xxx_dma_make_chain(int ch,struct stmp37xx_circ_dma_chain * chain,struct stmp3xxx_dma_descriptor descriptors[],unsigned items)245 int stmp3xxx_dma_make_chain(int ch, struct stmp37xx_circ_dma_chain *chain,
246 struct stmp3xxx_dma_descriptor descriptors[],
247 unsigned items)
248 {
249 int i;
250 int err = 0;
251
252 if (items == 0)
253 return err;
254
255 for (i = 0; i < items; i++) {
256 err = stmp3xxx_dma_allocate_command(ch, &descriptors[i]);
257 if (err) {
258 WARN_ON(err);
259 /*
260 * Couldn't allocate the whole chain.
261 * deallocate what has been allocated
262 */
263 if (i) {
264 do {
265 stmp3xxx_dma_free_command(ch,
266 &descriptors
267 [i]);
268 } while (i-- > 0);
269 }
270 return err;
271 }
272
273 /* link them! */
274 if (i > 0) {
275 descriptors[i - 1].next_descr = &descriptors[i];
276 descriptors[i - 1].command->next =
277 descriptors[i].handle;
278 }
279 }
280
281 /* make list circular */
282 descriptors[items - 1].next_descr = &descriptors[0];
283 descriptors[items - 1].command->next = descriptors[0].handle;
284
285 chain->total_count = items;
286 chain->chain = descriptors;
287 chain->free_index = 0;
288 chain->active_index = 0;
289 chain->cooked_index = 0;
290 chain->free_count = items;
291 chain->active_count = 0;
292 chain->cooked_count = 0;
293 chain->bus = STMP3XXX_DMA_BUS(ch);
294 chain->channel = STMP3XXX_DMA_CHANNEL(ch);
295 return err;
296 }
297 EXPORT_SYMBOL(stmp3xxx_dma_make_chain);
298
stmp37xx_circ_clear_chain(struct stmp37xx_circ_dma_chain * chain)299 void stmp37xx_circ_clear_chain(struct stmp37xx_circ_dma_chain *chain)
300 {
301 BUG_ON(stmp3xxx_dma_running(STMP3XXX_DMA(chain->channel, chain->bus)));
302 chain->free_index = 0;
303 chain->active_index = 0;
304 chain->cooked_index = 0;
305 chain->free_count = chain->total_count;
306 chain->active_count = 0;
307 chain->cooked_count = 0;
308 }
309 EXPORT_SYMBOL(stmp37xx_circ_clear_chain);
310
stmp37xx_circ_advance_free(struct stmp37xx_circ_dma_chain * chain,unsigned count)311 void stmp37xx_circ_advance_free(struct stmp37xx_circ_dma_chain *chain,
312 unsigned count)
313 {
314 BUG_ON(chain->cooked_count < count);
315
316 chain->cooked_count -= count;
317 chain->cooked_index += count;
318 chain->cooked_index %= chain->total_count;
319 chain->free_count += count;
320 }
321 EXPORT_SYMBOL(stmp37xx_circ_advance_free);
322
stmp37xx_circ_advance_active(struct stmp37xx_circ_dma_chain * chain,unsigned count)323 void stmp37xx_circ_advance_active(struct stmp37xx_circ_dma_chain *chain,
324 unsigned count)
325 {
326 void __iomem *c;
327 u32 mask_clr, mask;
328 BUG_ON(chain->free_count < count);
329
330 chain->free_count -= count;
331 chain->free_index += count;
332 chain->free_index %= chain->total_count;
333 chain->active_count += count;
334
335 switch (chain->bus) {
336 case STMP3XXX_BUS_APBH:
337 c = REGS_APBH_BASE + HW_APBH_CHn_SEMA + 0x70 * chain->channel;
338 mask_clr = BM_APBH_CHn_SEMA_INCREMENT_SEMA;
339 mask = BF(count, APBH_CHn_SEMA_INCREMENT_SEMA);
340 break;
341 case STMP3XXX_BUS_APBX:
342 c = REGS_APBX_BASE + HW_APBX_CHn_SEMA + 0x70 * chain->channel;
343 mask_clr = BM_APBX_CHn_SEMA_INCREMENT_SEMA;
344 mask = BF(count, APBX_CHn_SEMA_INCREMENT_SEMA);
345 break;
346 default:
347 BUG();
348 return;
349 }
350
351 /* Set counting semaphore (kicks off transfer). Assumes
352 peripheral has been set up correctly */
353 stmp3xxx_clearl(mask_clr, c);
354 stmp3xxx_setl(mask, c);
355 }
356 EXPORT_SYMBOL(stmp37xx_circ_advance_active);
357
stmp37xx_circ_advance_cooked(struct stmp37xx_circ_dma_chain * chain)358 unsigned stmp37xx_circ_advance_cooked(struct stmp37xx_circ_dma_chain *chain)
359 {
360 unsigned cooked;
361
362 cooked = chain->active_count -
363 stmp3xxx_dma_read_semaphore(STMP3XXX_DMA(chain->channel, chain->bus));
364
365 chain->active_count -= cooked;
366 chain->active_index += cooked;
367 chain->active_index %= chain->total_count;
368
369 chain->cooked_count += cooked;
370
371 return cooked;
372 }
373 EXPORT_SYMBOL(stmp37xx_circ_advance_cooked);
374
stmp3xxx_dma_set_alt_target(int channel,int function)375 void stmp3xxx_dma_set_alt_target(int channel, int function)
376 {
377 #if defined(CONFIG_ARCH_STMP37XX)
378 unsigned bits = 4;
379 #elif defined(CONFIG_ARCH_STMP378X)
380 unsigned bits = 2;
381 #else
382 #error wrong arch
383 #endif
384 int shift = STMP3XXX_DMA_CHANNEL(channel) * bits;
385 unsigned mask = (1<<bits) - 1;
386 void __iomem *c;
387
388 BUG_ON(function < 0 || function >= (1<<bits));
389 pr_debug("%s: channel = %d, using mask %x, "
390 "shift = %d\n", __func__, channel, mask, shift);
391
392 switch (STMP3XXX_DMA_BUS(channel)) {
393 case STMP3XXX_BUS_APBH:
394 c = REGS_APBH_BASE + HW_APBH_DEVSEL;
395 break;
396 case STMP3XXX_BUS_APBX:
397 c = REGS_APBX_BASE + HW_APBX_DEVSEL;
398 break;
399 default:
400 BUG();
401 }
402 stmp3xxx_clearl(mask << shift, c);
403 stmp3xxx_setl(mask << shift, c);
404 }
405 EXPORT_SYMBOL(stmp3xxx_dma_set_alt_target);
406
stmp3xxx_dma_suspend(void)407 void stmp3xxx_dma_suspend(void)
408 {
409 stmp3xxx_setl(BM_APBH_CTRL0_CLKGATE, REGS_APBH_BASE + HW_APBH_CTRL0);
410 stmp3xxx_setl(BM_APBX_CTRL0_CLKGATE, REGS_APBX_BASE + HW_APBX_CTRL0);
411 }
412
stmp3xxx_dma_resume(void)413 void stmp3xxx_dma_resume(void)
414 {
415 stmp3xxx_clearl(BM_APBH_CTRL0_CLKGATE | BM_APBH_CTRL0_SFTRST,
416 REGS_APBH_BASE + HW_APBH_CTRL0);
417 stmp3xxx_clearl(BM_APBX_CTRL0_CLKGATE | BM_APBX_CTRL0_SFTRST,
418 REGS_APBX_BASE + HW_APBX_CTRL0);
419 }
420
421 #ifdef CONFIG_CPU_FREQ
422
423 struct dma_notifier_block {
424 struct notifier_block nb;
425 void *data;
426 };
427
dma_cpufreq_notifier(struct notifier_block * self,unsigned long phase,void * p)428 static int dma_cpufreq_notifier(struct notifier_block *self,
429 unsigned long phase, void *p)
430 {
431 switch (phase) {
432 case CPUFREQ_POSTCHANGE:
433 stmp3xxx_dma_resume();
434 break;
435
436 case CPUFREQ_PRECHANGE:
437 stmp3xxx_dma_suspend();
438 break;
439
440 default:
441 break;
442 }
443
444 return NOTIFY_DONE;
445 }
446
447 static struct dma_notifier_block dma_cpufreq_nb = {
448 .nb = {
449 .notifier_call = dma_cpufreq_notifier,
450 },
451 };
452 #endif /* CONFIG_CPU_FREQ */
453
stmp3xxx_dma_init(void)454 void __init stmp3xxx_dma_init(void)
455 {
456 stmp3xxx_clearl(BM_APBH_CTRL0_CLKGATE | BM_APBH_CTRL0_SFTRST,
457 REGS_APBH_BASE + HW_APBH_CTRL0);
458 stmp3xxx_clearl(BM_APBX_CTRL0_CLKGATE | BM_APBX_CTRL0_SFTRST,
459 REGS_APBX_BASE + HW_APBX_CTRL0);
460 #ifdef CONFIG_CPU_FREQ
461 cpufreq_register_notifier(&dma_cpufreq_nb.nb,
462 CPUFREQ_TRANSITION_NOTIFIER);
463 #endif /* CONFIG_CPU_FREQ */
464 }
465