1 /*
2 * arch/arm/kernel/dma-sa1100.c
3 *
4 * Support functions for the SA11x0 internal DMA channels.
5 * (see also Documentation/arm/SA1100/DMA)
6 *
7 * Copyright (C) 2000 Nicolas Pitre
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21
22 #include <asm/system.h>
23 #include <asm/irq.h>
24 #include <asm/hardware.h>
25 #include <asm/io.h>
26 #include <asm/dma.h>
27 #include <asm/mach/dma.h>
28
29
30 #undef DEBUG
31 #ifdef DEBUG
32 #define DPRINTK( s, arg... ) printk( "dma<%s>: " s, dma->device_id , ##arg )
33 #else
34 #define DPRINTK( x... )
35 #endif
36
37
38 /*
39 * DMA control register structure
40 */
41 typedef struct {
42 volatile u_long DDAR;
43 volatile u_long SetDCSR;
44 volatile u_long ClrDCSR;
45 volatile u_long RdDCSR;
46 volatile dma_addr_t DBSA;
47 volatile u_long DBTA;
48 volatile dma_addr_t DBSB;
49 volatile u_long DBTB;
50 } dma_regs_t;
51
52 #include "dma.h"
53
54 sa1100_dma_t dma_chan[MAX_SA1100_DMA_CHANNELS];
55
56 /*
57 * Maximum physical DMA buffer size
58 */
59 #define MAX_DMA_SIZE 0x1fff
60 #define MAX_DMA_ORDER 12
61
62
63 /*
64 * DMA processing...
65 */
66
start_sa1100_dma(sa1100_dma_t * dma,dma_addr_t dma_ptr,int size)67 static inline int start_sa1100_dma(sa1100_dma_t * dma, dma_addr_t dma_ptr, int size)
68 {
69 dma_regs_t *regs = dma->regs;
70 int status;
71
72 status = regs->RdDCSR;
73
74 /* If both DMA buffers are started, there's nothing else we can do. */
75 if ((status & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) {
76 DPRINTK("start: st %#x busy\n", status);
77 return -EBUSY;
78 }
79
80 if (((status & DCSR_BIU) && (status & DCSR_STRTB)) ||
81 (!(status & DCSR_BIU) && !(status & DCSR_STRTA))) {
82 if (status & DCSR_DONEA) {
83 /* give a chance for the interrupt to be processed */
84 goto irq_pending;
85 }
86 regs->DBSA = dma_ptr;
87 regs->DBTA = size;
88 regs->SetDCSR = DCSR_STRTA | DCSR_IE | DCSR_RUN;
89 DPRINTK("start a=%#x s=%d on A\n", dma_ptr, size);
90 } else {
91 if (status & DCSR_DONEB) {
92 /* give a chance for the interrupt to be processed */
93 goto irq_pending;
94 }
95 regs->DBSB = dma_ptr;
96 regs->DBTB = size;
97 regs->SetDCSR = DCSR_STRTB | DCSR_IE | DCSR_RUN;
98 DPRINTK("start a=%#x s=%d on B\n", dma_ptr, size);
99 }
100
101 return 0;
102
103 irq_pending:
104 return -EAGAIN;
105 }
106
107
start_dma(sa1100_dma_t * dma,dma_addr_t dma_ptr,int size)108 static int start_dma(sa1100_dma_t *dma, dma_addr_t dma_ptr, int size)
109 {
110 if (channel_is_sa1111_sac(dma - dma_chan))
111 return start_sa1111_sac_dma(dma, dma_ptr, size);
112 return start_sa1100_dma(dma, dma_ptr, size);
113 }
114
115
116 /* This must be called with IRQ disabled */
process_dma(sa1100_dma_t * dma)117 static void process_dma(sa1100_dma_t * dma)
118 {
119 dma_buf_t *buf;
120 int chunksize;
121
122 for (;;) {
123 buf = dma->tail;
124
125 if (!buf || dma->stopped) {
126 /* no more data available */
127 DPRINTK("process: no more buf (dma %s)\n",
128 dma->curr ? "active" : "inactive");
129 /*
130 * Some devices may require DMA still sending data
131 * at any time for clock reference, etc.
132 * Note: if there is still a data buffer being
133 * processed then the ref count is negative. This
134 * allows for the DMA termination to be accounted in
135 * the proper order.
136 */
137 if (dma->spin_size && dma->spin_ref >= 0) {
138 chunksize = dma->spin_size;
139 if (chunksize > MAX_DMA_SIZE)
140 chunksize = (1 << MAX_DMA_ORDER);
141 while (start_dma(dma, dma->spin_addr, chunksize) == 0)
142 dma->spin_ref++;
143 if (dma->curr != NULL)
144 dma->spin_ref = -dma->spin_ref;
145 }
146 break;
147 }
148
149 /*
150 * This improves latency if there are some active spinning
151 * buffers. We kill them altogether.
152 */
153 if (dma->spin_ref > 0) {
154 if (channel_is_sa1111_sac(dma - dma_chan))
155 sa1111_reset_sac_dma(dma - dma_chan);
156 else
157 dma->regs->ClrDCSR =
158 DCSR_STRTA|DCSR_STRTB|DCSR_DONEA|DCSR_DONEB;
159 dma->spin_ref = 0;
160 }
161
162 /*
163 * Let's try to start DMA on the current buffer.
164 * If DMA is busy then we break here.
165 */
166 chunksize = buf->size;
167 if (chunksize > MAX_DMA_SIZE)
168 chunksize = (1 << MAX_DMA_ORDER);
169 DPRINTK("process: b=%#x s=%d\n", (int) buf->id, buf->size);
170 if (start_dma(dma, buf->dma_ptr, chunksize) != 0)
171 break;
172 if (!dma->curr)
173 dma->curr = buf;
174 buf->ref++;
175 buf->dma_ptr += chunksize;
176 buf->size -= chunksize;
177 if (buf->size == 0) {
178 /* current buffer is done: move tail to the next one */
179 dma->tail = buf->next;
180 DPRINTK("process: next b=%#x\n", (int) dma->tail);
181 }
182 }
183 }
184
185
186 /* This must be called with IRQ disabled */
sa1100_dma_done(sa1100_dma_t * dma)187 void sa1100_dma_done (sa1100_dma_t *dma)
188 {
189 dma_buf_t *buf = dma->curr;
190
191 if (dma->spin_ref > 0) {
192 dma->spin_ref--;
193 } else if (buf) {
194 buf->ref--;
195 if (buf->ref == 0 && buf->size == 0) {
196 /*
197 * Current buffer is done.
198 * Move current reference to the next one and send
199 * the processed buffer to the callback function,
200 * then discard it.
201 */
202 DPRINTK("IRQ: buf done\n");
203 dma->curr = buf->next;
204 dma->spin_ref = -dma->spin_ref;
205 if (dma->head == buf)
206 dma->head = NULL;
207 if (dma->callback) {
208 int size = buf->dma_ptr - buf->dma_start;
209 dma->callback(buf->id, size);
210 }
211 kfree(buf);
212 }
213 }
214
215 process_dma(dma);
216 }
217
218
dma_irq_handler(int irq,void * dev_id,struct pt_regs * regs)219 static void dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
220 {
221 sa1100_dma_t *dma = (sa1100_dma_t *) dev_id;
222 int status = dma->regs->RdDCSR;
223
224 DPRINTK("IRQ: b=%#x st=%#x\n", (int) dma->curr->id, status);
225
226 if (status & (DCSR_ERROR)) {
227 printk(KERN_ERR "DMA on \"%s\" caused an error\n", dma->device_id);
228 dma->regs->ClrDCSR = DCSR_ERROR;
229 }
230
231 dma->regs->ClrDCSR = status & (DCSR_DONEA | DCSR_DONEB);
232 if (status & DCSR_DONEA)
233 sa1100_dma_done (dma);
234 if (status & DCSR_DONEB)
235 sa1100_dma_done (dma);
236 }
237
238
239 /*
240 * DMA interface functions
241 */
242
243 static spinlock_t dma_list_lock;
244
sa1100_request_dma(dmach_t * channel,const char * device_id,dma_device_t device)245 int sa1100_request_dma (dmach_t * channel, const char *device_id,
246 dma_device_t device)
247 {
248 sa1100_dma_t *dma = NULL;
249 dma_regs_t *regs;
250 int i, err;
251
252 *channel = -1; /* to be sure we catch the freeing of a misregistered channel */
253
254 err = 0;
255 spin_lock(&dma_list_lock);
256 for (i = 0; i < SA1100_DMA_CHANNELS; i++) {
257 if (dma_chan[i].in_use) {
258 if (dma_chan[i].device == device) {
259 err = -EBUSY;
260 break;
261 }
262 } else if (!dma) {
263 dma = &dma_chan[i];
264 }
265 }
266 if (!err) {
267 if (dma)
268 dma->in_use = 1;
269 else
270 err = -ENOSR;
271 }
272 spin_unlock(&dma_list_lock);
273 if (err)
274 return err;
275
276 err = request_irq(dma->irq, dma_irq_handler, SA_INTERRUPT,
277 device_id, (void *) dma);
278 if (err) {
279 printk(KERN_ERR
280 "%s: unable to request IRQ %d for DMA channel\n",
281 device_id, dma->irq);
282 return err;
283 }
284
285 *channel = dma - dma_chan;
286 dma->device_id = device_id;
287 dma->device = device;
288 dma->callback = NULL;
289 dma->spin_size = 0;
290
291 regs = dma->regs;
292 regs->ClrDCSR =
293 (DCSR_DONEA | DCSR_DONEB | DCSR_STRTA | DCSR_STRTB |
294 DCSR_IE | DCSR_ERROR | DCSR_RUN);
295 regs->DDAR = device;
296 DPRINTK("requested\n");
297 return 0;
298 }
299
300
sa1100_dma_set_callback(dmach_t channel,dma_callback_t cb)301 int sa1100_dma_set_callback(dmach_t channel, dma_callback_t cb)
302 {
303 sa1100_dma_t *dma = &dma_chan[channel];
304
305 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
306 return -EINVAL;
307
308 dma->callback = cb;
309 DPRINTK("cb = %p\n", cb);
310 return 0;
311 }
312
313
sa1100_dma_set_spin(dmach_t channel,dma_addr_t addr,int size)314 int sa1100_dma_set_spin(dmach_t channel, dma_addr_t addr, int size)
315 {
316 sa1100_dma_t *dma = &dma_chan[channel];
317 int flags;
318
319 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
320 return -EINVAL;
321
322 DPRINTK("set spin %d at %#x\n", size, addr);
323 local_irq_save(flags);
324 dma->spin_addr = addr;
325 dma->spin_size = size;
326 if (size)
327 process_dma(dma);
328 local_irq_restore(flags);
329 return 0;
330 }
331
332
sa1100_dma_queue_buffer(dmach_t channel,void * buf_id,dma_addr_t data,int size)333 int sa1100_dma_queue_buffer(dmach_t channel, void *buf_id,
334 dma_addr_t data, int size)
335 {
336 sa1100_dma_t *dma;
337 dma_buf_t *buf;
338 int flags;
339
340 dma = &dma_chan[channel];
341 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
342 return -EINVAL;
343
344 buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
345 if (!buf)
346 return -ENOMEM;
347
348 buf->next = NULL;
349 buf->ref = 0;
350 buf->dma_ptr = buf->dma_start = data;
351 buf->size = size;
352 buf->id = buf_id;
353 DPRINTK("queueing b=%#x a=%#x s=%d\n", (int) buf_id, data, size);
354
355 local_irq_save(flags);
356 if (dma->head)
357 dma->head->next = buf;
358 dma->head = buf;
359 if (!dma->tail)
360 dma->tail = buf;
361 process_dma(dma);
362 local_irq_restore(flags);
363
364 return 0;
365 }
366
367
sa1100_dma_get_current(dmach_t channel,void ** buf_id,dma_addr_t * addr)368 int sa1100_dma_get_current(dmach_t channel, void **buf_id, dma_addr_t *addr)
369 {
370 sa1100_dma_t *dma = &dma_chan[channel];
371 dma_regs_t *regs;
372 int flags, ret;
373
374 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
375 return -EINVAL;
376
377 if (channel_is_sa1111_sac(channel))
378 return sa1111_dma_get_current(channel, buf_id, addr);
379
380 regs = dma->regs;
381 local_irq_save(flags);
382 if (dma->curr && dma->spin_ref <= 0) {
383 dma_buf_t *buf = dma->curr;
384 int status, using_bufa;
385
386 status = regs->RdDCSR;
387 /*
388 * If we got here, that's because there is, or recently was, a
389 * buffer being processed. We must determine whether buffer
390 * A or B is active. Two possibilities: either we are
391 * in the middle of a buffer, or the DMA controller just
392 * switched to the next toggle but the interrupt hasn't been
393 * serviced yet. The former case is straight forward. In
394 * the later case, we'll do like if DMA is just at the end
395 * of the previous toggle since all registers haven't been
396 * reset yet. This goes around the edge case and since we're
397 * always a little behind anyways it shouldn't make a big
398 * difference. If DMA has been stopped prior calling this
399 * then the position is always exact.
400 */
401 using_bufa = ((!(status & DCSR_BIU) && (status & DCSR_STRTA)) ||
402 ( (status & DCSR_BIU) && !(status & DCSR_STRTB)));
403 if (buf_id)
404 *buf_id = buf->id;
405 *addr = (using_bufa) ? regs->DBSA : regs->DBSB;
406 /*
407 * Clamp funky pointers sometimes returned by the hardware
408 * on completed DMA transfers
409 */
410 if (*addr < buf->dma_start ||
411 *addr > buf->dma_ptr)
412 *addr = buf->dma_ptr;
413 DPRINTK("curr_pos: b=%#x a=%#x\n", (int)dma->curr->id, *addr);
414 ret = 0;
415 } else if (dma->tail && dma->stopped) {
416 dma_buf_t *buf = dma->tail;
417 if (buf_id)
418 *buf_id = buf->id;
419 *addr = buf->dma_ptr;
420 ret = 0;
421 } else {
422 if (buf_id)
423 *buf_id = NULL;
424 *addr = 0;
425 ret = -ENXIO;
426 }
427 local_irq_restore(flags);
428 return ret;
429 }
430
431
sa1100_dma_stop(dmach_t channel)432 int sa1100_dma_stop(dmach_t channel)
433 {
434 sa1100_dma_t *dma = &dma_chan[channel];
435 int flags;
436
437 if (channel_is_sa1111_sac(channel))
438 return sa1111_dma_stop(channel);
439
440 if (dma->stopped)
441 return 0;
442 local_irq_save(flags);
443 dma->stopped = 1;
444 /*
445 * Stop DMA and tweak state variables so everything could restart
446 * from there when resume/wakeup occurs.
447 */
448 dma->regs->ClrDCSR = DCSR_RUN | DCSR_IE;
449 if (dma->curr) {
450 dma_buf_t *buf = dma->curr;
451 if (dma->spin_ref <= 0) {
452 dma_addr_t curpos;
453 sa1100_dma_get_current(channel, NULL, &curpos);
454 buf->size += buf->dma_ptr - curpos;
455 buf->dma_ptr = curpos;
456 }
457 buf->ref = 0;
458 dma->tail = buf;
459 dma->curr = NULL;
460 }
461 dma->spin_ref = 0;
462 dma->regs->ClrDCSR = DCSR_STRTA|DCSR_STRTB|DCSR_DONEA|DCSR_DONEB;
463 process_dma(dma);
464 local_irq_restore(flags);
465 return 0;
466 }
467
468
sa1100_dma_resume(dmach_t channel)469 int sa1100_dma_resume(dmach_t channel)
470 {
471 sa1100_dma_t *dma = &dma_chan[channel];
472
473 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
474 return -EINVAL;
475
476 if (channel_is_sa1111_sac(channel))
477 return sa1111_dma_resume(channel);
478
479 if (dma->stopped) {
480 unsigned long flags;
481 local_irq_save(flags);
482 dma->stopped = 0;
483 process_dma(dma);
484 local_irq_restore(flags);
485 }
486 return 0;
487 }
488
489
sa1100_dma_flush_all(dmach_t channel)490 int sa1100_dma_flush_all(dmach_t channel)
491 {
492 sa1100_dma_t *dma = &dma_chan[channel];
493 dma_buf_t *buf, *next_buf;
494 int flags;
495
496 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
497 return -EINVAL;
498
499 local_irq_save(flags);
500 if (channel_is_sa1111_sac(channel))
501 sa1111_reset_sac_dma(channel);
502 else
503 dma->regs->ClrDCSR = DCSR_STRTA|DCSR_STRTB|DCSR_DONEA|DCSR_DONEB|DCSR_RUN|DCSR_IE;
504 buf = dma->curr;
505 if (!buf)
506 buf = dma->tail;
507 dma->head = dma->tail = dma->curr = NULL;
508 dma->stopped = 0;
509 dma->spin_ref = 0;
510 process_dma(dma);
511 local_irq_restore(flags);
512 while (buf) {
513 next_buf = buf->next;
514 kfree(buf);
515 buf = next_buf;
516 }
517 DPRINTK("flushed\n");
518 return 0;
519 }
520
521
sa1100_free_dma(dmach_t channel)522 void sa1100_free_dma(dmach_t channel)
523 {
524 sa1100_dma_t *dma;
525
526 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS)
527 return;
528
529 dma = &dma_chan[channel];
530 if (!dma->in_use) {
531 printk(KERN_ERR "Trying to free free DMA%d\n", channel);
532 return;
533 }
534
535 sa1100_dma_set_spin(channel, 0, 0);
536 sa1100_dma_flush_all(channel);
537
538 if (channel_is_sa1111_sac(channel)) {
539 sa1111_cleanup_sac_dma(channel);
540 } else {
541 free_irq(IRQ_DMA0 + channel, (void *) dma);
542 }
543 dma->in_use = 0;
544
545 DPRINTK("freed\n");
546 }
547
548
549 EXPORT_SYMBOL(sa1100_request_dma);
550 EXPORT_SYMBOL(sa1100_dma_set_callback);
551 EXPORT_SYMBOL(sa1100_dma_set_spin);
552 EXPORT_SYMBOL(sa1100_dma_queue_buffer);
553 EXPORT_SYMBOL(sa1100_dma_get_current);
554 EXPORT_SYMBOL(sa1100_dma_stop);
555 EXPORT_SYMBOL(sa1100_dma_resume);
556 EXPORT_SYMBOL(sa1100_dma_flush_all);
557 EXPORT_SYMBOL(sa1100_free_dma);
558
559
560 #ifdef CONFIG_PM
561 /* Drivers should call this from their PM callback function */
562
sa1100_dma_sleep(dmach_t channel)563 int sa1100_dma_sleep(dmach_t channel)
564 {
565 sa1100_dma_t *dma = &dma_chan[channel];
566 int orig_state;
567
568 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
569 return -EINVAL;
570
571 if (channel_is_sa1111_sac(channel)) {
572 /* We'll cheat a little until someone actually
573 * write the real thing.
574 */
575 sa1111_reset_sac_dma(channel);
576 return 0;
577 }
578
579 orig_state = dma->stopped;
580 sa1100_dma_stop(channel);
581 dma->regs->ClrDCSR = DCSR_RUN | DCSR_IE | DCSR_STRTA | DCSR_STRTB;
582 dma->stopped = orig_state;
583 dma->spin_ref = 0;
584 return 0;
585 }
586
sa1100_dma_wakeup(dmach_t channel)587 int sa1100_dma_wakeup(dmach_t channel)
588 {
589 sa1100_dma_t *dma = &dma_chan[channel];
590 dma_regs_t *regs;
591 int flags;
592
593 if ((unsigned)channel >= MAX_SA1100_DMA_CHANNELS || !dma->in_use)
594 return -EINVAL;
595
596 if (channel_is_sa1111_sac(channel)) {
597 /* We'll cheat a little until someone actually
598 * write the real thing.
599 */
600 return 0;
601 }
602
603 regs = dma->regs;
604 regs->ClrDCSR =
605 (DCSR_DONEA | DCSR_DONEB | DCSR_STRTA | DCSR_STRTB |
606 DCSR_IE | DCSR_ERROR | DCSR_RUN);
607 regs->DDAR = dma->device;
608 local_irq_save(flags);
609 process_dma(dma);
610 local_irq_restore(flags);
611 return 0;
612 }
613
614 EXPORT_SYMBOL(sa1100_dma_sleep);
615 EXPORT_SYMBOL(sa1100_dma_wakeup);
616
617 #endif
618
619
sa1100_init_dma(void)620 static int __init sa1100_init_dma(void)
621 {
622 int channel;
623 for (channel = 0; channel < SA1100_DMA_CHANNELS; channel++) {
624 dma_chan[channel].regs =
625 (dma_regs_t *) &DDAR(channel);
626 dma_chan[channel].irq = IRQ_DMA0 + channel;
627 }
628 return 0;
629 }
630
631 __initcall(sa1100_init_dma);
632