1 /*
2 * arch/arm/mach-ep93xx/dma-m2p.c
3 * M2P DMA handling for Cirrus EP93xx chips.
4 *
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Copyright (C) 2006 Applied Data Systems
7 *
8 * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 */
15
16 /*
17 * On the EP93xx chip the following peripherals my be allocated to the 10
18 * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
19 *
20 * I2S contains 3 Tx and 3 Rx DMA Channels
21 * AAC contains 3 Tx and 3 Rx DMA Channels
22 * UART1 contains 1 Tx and 1 Rx DMA Channels
23 * UART2 contains 1 Tx and 1 Rx DMA Channels
24 * UART3 contains 1 Tx and 1 Rx DMA Channels
25 * IrDA contains 1 Tx and 1 Rx DMA Channels
26 *
27 * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
28 * with this implementation.
29 */
30
31 #define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
32
33 #include <linux/kernel.h>
34 #include <linux/clk.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
37 #include <linux/module.h>
38 #include <linux/io.h>
39
40 #include <mach/dma.h>
41 #include <mach/hardware.h>
42
43 #define M2P_CONTROL 0x00
44 #define M2P_CONTROL_STALL_IRQ_EN (1 << 0)
45 #define M2P_CONTROL_NFB_IRQ_EN (1 << 1)
46 #define M2P_CONTROL_ERROR_IRQ_EN (1 << 3)
47 #define M2P_CONTROL_ENABLE (1 << 4)
48 #define M2P_INTERRUPT 0x04
49 #define M2P_INTERRUPT_STALL (1 << 0)
50 #define M2P_INTERRUPT_NFB (1 << 1)
51 #define M2P_INTERRUPT_ERROR (1 << 3)
52 #define M2P_PPALLOC 0x08
53 #define M2P_STATUS 0x0c
54 #define M2P_REMAIN 0x14
55 #define M2P_MAXCNT0 0x20
56 #define M2P_BASE0 0x24
57 #define M2P_MAXCNT1 0x30
58 #define M2P_BASE1 0x34
59
60 #define STATE_IDLE 0 /* Channel is inactive. */
61 #define STATE_STALL 1 /* Channel is active, no buffers pending. */
62 #define STATE_ON 2 /* Channel is active, one buffer pending. */
63 #define STATE_NEXT 3 /* Channel is active, two buffers pending. */
64
65 struct m2p_channel {
66 char *name;
67 void __iomem *base;
68 int irq;
69
70 struct clk *clk;
71 spinlock_t lock;
72
73 void *client;
74 unsigned next_slot:1;
75 struct ep93xx_dma_buffer *buffer_xfer;
76 struct ep93xx_dma_buffer *buffer_next;
77 struct list_head buffers_pending;
78 };
79
80 static struct m2p_channel m2p_rx[] = {
81 {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1},
82 {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3},
83 {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5},
84 {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7},
85 {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9},
86 {NULL},
87 };
88
89 static struct m2p_channel m2p_tx[] = {
90 {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0},
91 {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2},
92 {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4},
93 {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6},
94 {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8},
95 {NULL},
96 };
97
feed_buf(struct m2p_channel * ch,struct ep93xx_dma_buffer * buf)98 static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf)
99 {
100 if (ch->next_slot == 0) {
101 writel(buf->size, ch->base + M2P_MAXCNT0);
102 writel(buf->bus_addr, ch->base + M2P_BASE0);
103 } else {
104 writel(buf->size, ch->base + M2P_MAXCNT1);
105 writel(buf->bus_addr, ch->base + M2P_BASE1);
106 }
107 ch->next_slot ^= 1;
108 }
109
choose_buffer_xfer(struct m2p_channel * ch)110 static void choose_buffer_xfer(struct m2p_channel *ch)
111 {
112 struct ep93xx_dma_buffer *buf;
113
114 ch->buffer_xfer = NULL;
115 if (!list_empty(&ch->buffers_pending)) {
116 buf = list_entry(ch->buffers_pending.next,
117 struct ep93xx_dma_buffer, list);
118 list_del(&buf->list);
119 feed_buf(ch, buf);
120 ch->buffer_xfer = buf;
121 }
122 }
123
choose_buffer_next(struct m2p_channel * ch)124 static void choose_buffer_next(struct m2p_channel *ch)
125 {
126 struct ep93xx_dma_buffer *buf;
127
128 ch->buffer_next = NULL;
129 if (!list_empty(&ch->buffers_pending)) {
130 buf = list_entry(ch->buffers_pending.next,
131 struct ep93xx_dma_buffer, list);
132 list_del(&buf->list);
133 feed_buf(ch, buf);
134 ch->buffer_next = buf;
135 }
136 }
137
m2p_set_control(struct m2p_channel * ch,u32 v)138 static inline void m2p_set_control(struct m2p_channel *ch, u32 v)
139 {
140 /*
141 * The control register must be read immediately after being written so
142 * that the internal state machine is correctly updated. See the ep93xx
143 * users' guide for details.
144 */
145 writel(v, ch->base + M2P_CONTROL);
146 readl(ch->base + M2P_CONTROL);
147 }
148
m2p_channel_state(struct m2p_channel * ch)149 static inline int m2p_channel_state(struct m2p_channel *ch)
150 {
151 return (readl(ch->base + M2P_STATUS) >> 4) & 0x3;
152 }
153
m2p_irq(int irq,void * dev_id)154 static irqreturn_t m2p_irq(int irq, void *dev_id)
155 {
156 struct m2p_channel *ch = dev_id;
157 struct ep93xx_dma_m2p_client *cl;
158 u32 irq_status, v;
159 int error = 0;
160
161 cl = ch->client;
162
163 spin_lock(&ch->lock);
164 irq_status = readl(ch->base + M2P_INTERRUPT);
165
166 if (irq_status & M2P_INTERRUPT_ERROR) {
167 writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT);
168 error = 1;
169 }
170
171 if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) {
172 spin_unlock(&ch->lock);
173 return IRQ_NONE;
174 }
175
176 switch (m2p_channel_state(ch)) {
177 case STATE_IDLE:
178 pr_crit("dma interrupt without a dma buffer\n");
179 BUG();
180 break;
181
182 case STATE_STALL:
183 cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
184 if (ch->buffer_next != NULL) {
185 cl->buffer_finished(cl->cookie, ch->buffer_next,
186 0, error);
187 }
188 choose_buffer_xfer(ch);
189 choose_buffer_next(ch);
190 if (ch->buffer_xfer != NULL)
191 cl->buffer_started(cl->cookie, ch->buffer_xfer);
192 break;
193
194 case STATE_ON:
195 cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
196 ch->buffer_xfer = ch->buffer_next;
197 choose_buffer_next(ch);
198 cl->buffer_started(cl->cookie, ch->buffer_xfer);
199 break;
200
201 case STATE_NEXT:
202 pr_crit("dma interrupt while next\n");
203 BUG();
204 break;
205 }
206
207 v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN |
208 M2P_CONTROL_NFB_IRQ_EN);
209 if (ch->buffer_xfer != NULL)
210 v |= M2P_CONTROL_STALL_IRQ_EN;
211 if (ch->buffer_next != NULL)
212 v |= M2P_CONTROL_NFB_IRQ_EN;
213 m2p_set_control(ch, v);
214
215 spin_unlock(&ch->lock);
216 return IRQ_HANDLED;
217 }
218
find_free_channel(struct ep93xx_dma_m2p_client * cl)219 static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl)
220 {
221 struct m2p_channel *ch;
222 int i;
223
224 if (cl->flags & EP93XX_DMA_M2P_RX)
225 ch = m2p_rx;
226 else
227 ch = m2p_tx;
228
229 for (i = 0; ch[i].base; i++) {
230 struct ep93xx_dma_m2p_client *client;
231
232 client = ch[i].client;
233 if (client != NULL) {
234 int port;
235
236 port = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
237 if (port == (client->flags &
238 EP93XX_DMA_M2P_PORT_MASK)) {
239 pr_warning("DMA channel already used by %s\n",
240 cl->name ? : "unknown client");
241 return ERR_PTR(-EBUSY);
242 }
243 }
244 }
245
246 for (i = 0; ch[i].base; i++) {
247 if (ch[i].client == NULL)
248 return ch + i;
249 }
250
251 pr_warning("No free DMA channel for %s\n",
252 cl->name ? : "unknown client");
253 return ERR_PTR(-ENODEV);
254 }
255
channel_enable(struct m2p_channel * ch)256 static void channel_enable(struct m2p_channel *ch)
257 {
258 struct ep93xx_dma_m2p_client *cl = ch->client;
259 u32 v;
260
261 clk_enable(ch->clk);
262
263 v = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
264 writel(v, ch->base + M2P_PPALLOC);
265
266 v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK;
267 v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN;
268 m2p_set_control(ch, v);
269 }
270
channel_disable(struct m2p_channel * ch)271 static void channel_disable(struct m2p_channel *ch)
272 {
273 u32 v;
274
275 v = readl(ch->base + M2P_CONTROL);
276 v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
277 m2p_set_control(ch, v);
278
279 while (m2p_channel_state(ch) >= STATE_ON)
280 cpu_relax();
281
282 m2p_set_control(ch, 0x0);
283
284 while (m2p_channel_state(ch) == STATE_STALL)
285 cpu_relax();
286
287 clk_disable(ch->clk);
288 }
289
ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client * cl)290 int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl)
291 {
292 struct m2p_channel *ch;
293 int err;
294
295 ch = find_free_channel(cl);
296 if (IS_ERR(ch))
297 return PTR_ERR(ch);
298
299 err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch);
300 if (err)
301 return err;
302
303 ch->client = cl;
304 ch->next_slot = 0;
305 ch->buffer_xfer = NULL;
306 ch->buffer_next = NULL;
307 INIT_LIST_HEAD(&ch->buffers_pending);
308
309 cl->channel = ch;
310
311 channel_enable(ch);
312
313 return 0;
314 }
315 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register);
316
ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client * cl)317 void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl)
318 {
319 struct m2p_channel *ch = cl->channel;
320
321 channel_disable(ch);
322 free_irq(ch->irq, ch);
323 ch->client = NULL;
324 }
325 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister);
326
ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client * cl,struct ep93xx_dma_buffer * buf)327 void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl,
328 struct ep93xx_dma_buffer *buf)
329 {
330 struct m2p_channel *ch = cl->channel;
331 unsigned long flags;
332 u32 v;
333
334 spin_lock_irqsave(&ch->lock, flags);
335 v = readl(ch->base + M2P_CONTROL);
336 if (ch->buffer_xfer == NULL) {
337 ch->buffer_xfer = buf;
338 feed_buf(ch, buf);
339 cl->buffer_started(cl->cookie, buf);
340
341 v |= M2P_CONTROL_STALL_IRQ_EN;
342 m2p_set_control(ch, v);
343
344 } else if (ch->buffer_next == NULL) {
345 ch->buffer_next = buf;
346 feed_buf(ch, buf);
347
348 v |= M2P_CONTROL_NFB_IRQ_EN;
349 m2p_set_control(ch, v);
350 } else {
351 list_add_tail(&buf->list, &ch->buffers_pending);
352 }
353 spin_unlock_irqrestore(&ch->lock, flags);
354 }
355 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit);
356
ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client * cl,struct ep93xx_dma_buffer * buf)357 void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl,
358 struct ep93xx_dma_buffer *buf)
359 {
360 struct m2p_channel *ch = cl->channel;
361
362 list_add_tail(&buf->list, &ch->buffers_pending);
363 }
364 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive);
365
ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client * cl)366 void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl)
367 {
368 struct m2p_channel *ch = cl->channel;
369
370 channel_disable(ch);
371 ch->next_slot = 0;
372 ch->buffer_xfer = NULL;
373 ch->buffer_next = NULL;
374 INIT_LIST_HEAD(&ch->buffers_pending);
375 channel_enable(ch);
376 }
377 EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush);
378
init_channel(struct m2p_channel * ch)379 static int init_channel(struct m2p_channel *ch)
380 {
381 ch->clk = clk_get(NULL, ch->name);
382 if (IS_ERR(ch->clk))
383 return PTR_ERR(ch->clk);
384
385 spin_lock_init(&ch->lock);
386 ch->client = NULL;
387
388 return 0;
389 }
390
ep93xx_dma_m2p_init(void)391 static int __init ep93xx_dma_m2p_init(void)
392 {
393 int i;
394 int ret;
395
396 for (i = 0; m2p_rx[i].base; i++) {
397 ret = init_channel(m2p_rx + i);
398 if (ret)
399 return ret;
400 }
401
402 for (i = 0; m2p_tx[i].base; i++) {
403 ret = init_channel(m2p_tx + i);
404 if (ret)
405 return ret;
406 }
407
408 pr_info("M2P DMA subsystem initialized\n");
409 return 0;
410 }
411 arch_initcall(ep93xx_dma_m2p_init);
412