1 /*
2  * BRIEF MODULE DESCRIPTION
3  *	Defines for using and allocating dma channels on the Alchemy
4  *      Au1000 mips processor.
5  *
6  * Copyright 2000 MontaVista Software Inc.
7  * Author: MontaVista Software, Inc.
8  *         	stevel@mvista.com or source@mvista.com
9  *
10  *  This program is free software; you can redistribute  it and/or modify it
11  *  under  the terms of  the GNU General  Public License as published by the
12  *  Free Software Foundation;  either version 2 of the  License, or (at your
13  *  option) any later version.
14  *
15  *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
16  *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
17  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18  *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
19  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
21  *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
22  *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
23  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  *  You should have received a copy of the  GNU General Public License along
27  *  with this program; if not, write  to the Free Software Foundation, Inc.,
28  *  675 Mass Ave, Cambridge, MA 02139, USA.
29  *
30  */
31 #ifndef __ASM_AU1000_DMA_H
32 #define __ASM_AU1000_DMA_H
33 
34 #include <linux/config.h>
35 #include <asm/io.h>		/* need byte IO */
36 #include <linux/spinlock.h>	/* And spinlocks */
37 #include <linux/delay.h>
38 #include <asm/system.h>
39 
40 #define NUM_AU1000_DMA_CHANNELS	8
41 
42 /* DMA Channel Base Addresses */
43 #define DMA_CHANNEL_BASE	0xB4002000
44 #define DMA_CHANNEL_LEN		0x00000100
45 
46 /* DMA Channel Register Offsets */
47 #define DMA_MODE_SET		0x00000000
48 #define DMA_MODE_READ		DMA_MODE_SET
49 #define DMA_MODE_CLEAR		0x00000004
50 /* DMA Mode register bits follow */
51 #define DMA_DAH_MASK		(0x0f << 20)
52 #define DMA_DID_BIT		16
53 #define DMA_DID_MASK		(0x0f << DMA_DID_BIT)
54 #define DMA_DS			(1<<15)
55 #define DMA_BE			(1<<13)
56 #define DMA_DR			(1<<12)
57 #define DMA_TS8			(1<<11)
58 #define DMA_DW_BIT		9
59 #define DMA_DW_MASK		(0x03 << DMA_DW_BIT)
60 #define DMA_DW8			(0 << DMA_DW_BIT)
61 #define DMA_DW16		(1 << DMA_DW_BIT)
62 #define DMA_DW32		(2 << DMA_DW_BIT)
63 #define DMA_NC			(1<<8)
64 #define DMA_IE			(1<<7)
65 #define DMA_HALT		(1<<6)
66 #define DMA_GO			(1<<5)
67 #define DMA_AB			(1<<4)
68 #define DMA_D1			(1<<3)
69 #define DMA_BE1			(1<<2)
70 #define DMA_D0			(1<<1)
71 #define DMA_BE0			(1<<0)
72 
73 #define DMA_PERIPHERAL_ADDR       0x00000008
74 #define DMA_BUFFER0_START         0x0000000C
75 #define DMA_BUFFER1_START         0x00000014
76 #define DMA_BUFFER0_COUNT         0x00000010
77 #define DMA_BUFFER1_COUNT         0x00000018
78 #define DMA_BAH_BIT 16
79 #define DMA_BAH_MASK (0x0f << DMA_BAH_BIT)
80 #define DMA_COUNT_BIT 0
81 #define DMA_COUNT_MASK (0xffff << DMA_COUNT_BIT)
82 
83 /* DMA Device ID's follow */
84 enum {
85 	DMA_ID_UART0_TX = 0,
86 	DMA_ID_UART0_RX,
87 	DMA_ID_GP04,
88 	DMA_ID_GP05,
89 	DMA_ID_AC97C_TX,
90 	DMA_ID_AC97C_RX,
91 	DMA_ID_UART3_TX,
92 	DMA_ID_UART3_RX,
93 	DMA_ID_USBDEV_EP0_RX,
94 	DMA_ID_USBDEV_EP0_TX,
95 	DMA_ID_USBDEV_EP2_TX,
96 	DMA_ID_USBDEV_EP3_TX,
97 	DMA_ID_USBDEV_EP4_RX,
98 	DMA_ID_USBDEV_EP5_RX,
99 	DMA_ID_I2S_TX,
100 	DMA_ID_I2S_RX,
101 	DMA_NUM_DEV
102 };
103 
104 /* DMA Device ID's for 2nd bank (AU1100) follow */
105 enum {
106 	DMA_ID_SD0_TX = 0,
107 	DMA_ID_SD0_RX,
108 	DMA_ID_SD1_TX,
109 	DMA_ID_SD1_RX,
110 	DMA_NUM_DEV_BANK2
111 };
112 
113 struct dma_chan {
114 	int dev_id;		// this channel is allocated if >=0, free otherwise
115 	unsigned int io;
116 	const char *dev_str;
117 	int irq;
118 	void *irq_dev;
119 	unsigned int fifo_addr;
120 	unsigned int mode;
121 };
122 
123 /* These are in arch/mips/au1000/common/dma.c */
124 extern struct dma_chan au1000_dma_table[];
125 extern int request_au1000_dma(int dev_id,
126 			      const char *dev_str,
127 			      void (*irqhandler)(int, void *,
128 						 struct pt_regs *),
129 			      unsigned long irqflags,
130 			      void *irq_dev_id);
131 extern void free_au1000_dma(unsigned int dmanr);
132 extern int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
133 				int length, int *eof, void *data);
134 extern void dump_au1000_dma_channel(unsigned int dmanr);
135 extern spinlock_t au1000_dma_spin_lock;
136 
137 
get_dma_chan(unsigned int dmanr)138 static __inline__ struct dma_chan *get_dma_chan(unsigned int dmanr)
139 {
140 	if (dmanr >= NUM_AU1000_DMA_CHANNELS
141 	    || au1000_dma_table[dmanr].dev_id < 0)
142 		return NULL;
143 	return &au1000_dma_table[dmanr];
144 }
145 
claim_dma_lock(void)146 static __inline__ unsigned long claim_dma_lock(void)
147 {
148 	unsigned long flags;
149 	spin_lock_irqsave(&au1000_dma_spin_lock, flags);
150 	return flags;
151 }
152 
release_dma_lock(unsigned long flags)153 static __inline__ void release_dma_lock(unsigned long flags)
154 {
155 	spin_unlock_irqrestore(&au1000_dma_spin_lock, flags);
156 }
157 
158 /*
159  * Set the DMA buffer enable bits in the mode register.
160  */
enable_dma_buffer0(unsigned int dmanr)161 static __inline__ void enable_dma_buffer0(unsigned int dmanr)
162 {
163 	struct dma_chan *chan = get_dma_chan(dmanr);
164 	if (!chan)
165 		return;
166 	au_writel(DMA_BE0, chan->io + DMA_MODE_SET);
167 }
enable_dma_buffer1(unsigned int dmanr)168 static __inline__ void enable_dma_buffer1(unsigned int dmanr)
169 {
170 	struct dma_chan *chan = get_dma_chan(dmanr);
171 	if (!chan)
172 		return;
173 	au_writel(DMA_BE1, chan->io + DMA_MODE_SET);
174 }
enable_dma_buffers(unsigned int dmanr)175 static __inline__ void enable_dma_buffers(unsigned int dmanr)
176 {
177 	struct dma_chan *chan = get_dma_chan(dmanr);
178 	if (!chan)
179 		return;
180 	au_writel(DMA_BE0 | DMA_BE1, chan->io + DMA_MODE_SET);
181 }
182 
start_dma(unsigned int dmanr)183 static __inline__ void start_dma(unsigned int dmanr)
184 {
185 	struct dma_chan *chan = get_dma_chan(dmanr);
186 	if (!chan)
187 		return;
188 
189 	au_writel(DMA_GO, chan->io + DMA_MODE_SET);
190 }
191 
192 #define DMA_HALT_POLL 0x5000
193 
halt_dma(unsigned int dmanr)194 static __inline__ void halt_dma(unsigned int dmanr)
195 {
196 	struct dma_chan *chan = get_dma_chan(dmanr);
197 	int i;
198 	if (!chan)
199 		return;
200 
201 	au_writel(DMA_GO, chan->io + DMA_MODE_CLEAR);
202 	// poll the halt bit
203 	for (i = 0; i < DMA_HALT_POLL; i++)
204 		if (au_readl(chan->io + DMA_MODE_READ) & DMA_HALT)
205 			break;
206 	if (i == DMA_HALT_POLL)
207 		printk(KERN_INFO "halt_dma: HALT poll expired!\n");
208 }
209 
210 
disable_dma(unsigned int dmanr)211 static __inline__ void disable_dma(unsigned int dmanr)
212 {
213 	struct dma_chan *chan = get_dma_chan(dmanr);
214 	if (!chan)
215 		return;
216 
217 	halt_dma(dmanr);
218 
219 	// now we can disable the buffers
220 	au_writel(~DMA_GO, chan->io + DMA_MODE_CLEAR);
221 }
222 
dma_halted(unsigned int dmanr)223 static __inline__ int dma_halted(unsigned int dmanr)
224 {
225 	struct dma_chan *chan = get_dma_chan(dmanr);
226 	if (!chan)
227 		return 1;
228 	return (au_readl(chan->io + DMA_MODE_READ) & DMA_HALT) ? 1 : 0;
229 }
230 
231 /* initialize a DMA channel */
init_dma(unsigned int dmanr)232 static __inline__ void init_dma(unsigned int dmanr)
233 {
234 	struct dma_chan *chan = get_dma_chan(dmanr);
235 	u32 mode;
236 	if (!chan)
237 		return;
238 
239 	disable_dma(dmanr);
240 
241 	// set device FIFO address
242 	au_writel(PHYSADDR(chan->fifo_addr),
243 		  chan->io + DMA_PERIPHERAL_ADDR);
244 
245 	mode = chan->mode | (chan->dev_id << DMA_DID_BIT);
246 	if (chan->irq)
247 		mode |= DMA_IE;
248 
249 	au_writel(~mode, chan->io + DMA_MODE_CLEAR);
250 	au_writel(mode, chan->io + DMA_MODE_SET);
251 }
252 
253 /*
254  * set mode for a specific DMA channel
255  */
set_dma_mode(unsigned int dmanr,unsigned int mode)256 static __inline__ void set_dma_mode(unsigned int dmanr, unsigned int mode)
257 {
258 	struct dma_chan *chan = get_dma_chan(dmanr);
259 	if (!chan)
260 		return;
261 	/*
262 	 * set_dma_mode is only allowed to change endianess, direction,
263 	 * transfer size, device FIFO width, and coherency settings.
264 	 * Make sure anything else is masked off.
265 	 */
266 	mode &= (DMA_BE | DMA_DR | DMA_TS8 | DMA_DW_MASK | DMA_NC);
267 	chan->mode &= ~(DMA_BE | DMA_DR | DMA_TS8 | DMA_DW_MASK | DMA_NC);
268 	chan->mode |= mode;
269 }
270 
get_dma_mode(unsigned int dmanr)271 static __inline__ unsigned int get_dma_mode(unsigned int dmanr)
272 {
273 	struct dma_chan *chan = get_dma_chan(dmanr);
274 	if (!chan)
275 		return 0;
276 	return chan->mode;
277 }
278 
get_dma_active_buffer(unsigned int dmanr)279 static __inline__ int get_dma_active_buffer(unsigned int dmanr)
280 {
281 	struct dma_chan *chan = get_dma_chan(dmanr);
282 	if (!chan)
283 		return -1;
284 	return (au_readl(chan->io + DMA_MODE_READ) & DMA_AB) ? 1 : 0;
285 }
286 
287 
288 /*
289  * set the device FIFO address for a specific DMA channel - only
290  * applicable to GPO4 and GPO5. All the other devices have fixed
291  * FIFO addresses.
292  */
set_dma_fifo_addr(unsigned int dmanr,unsigned int a)293 static __inline__ void set_dma_fifo_addr(unsigned int dmanr,
294 					 unsigned int a)
295 {
296 	struct dma_chan *chan = get_dma_chan(dmanr);
297 	if (!chan)
298 		return;
299 
300 	if (chan->mode & DMA_DS)	/* second bank of device ids */
301 		return;
302 
303 	if (chan->dev_id != DMA_ID_GP04 && chan->dev_id != DMA_ID_GP05)
304 		return;
305 
306 	au_writel(PHYSADDR(a), chan->io + DMA_PERIPHERAL_ADDR);
307 }
308 
309 /*
310  * Clear the DMA buffer done bits in the mode register.
311  */
clear_dma_done0(unsigned int dmanr)312 static __inline__ void clear_dma_done0(unsigned int dmanr)
313 {
314 	struct dma_chan *chan = get_dma_chan(dmanr);
315 	if (!chan)
316 		return;
317 	au_writel(DMA_D0, chan->io + DMA_MODE_CLEAR);
318 }
clear_dma_done1(unsigned int dmanr)319 static __inline__ void clear_dma_done1(unsigned int dmanr)
320 {
321 	struct dma_chan *chan = get_dma_chan(dmanr);
322 	if (!chan)
323 		return;
324 	au_writel(DMA_D1, chan->io + DMA_MODE_CLEAR);
325 }
326 
327 /*
328  * This does nothing - not applicable to Au1000 DMA.
329  */
set_dma_page(unsigned int dmanr,char pagenr)330 static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
331 {
332 }
333 
334 /*
335  * Set Buffer 0 transfer address for specific DMA channel.
336  */
set_dma_addr0(unsigned int dmanr,unsigned int a)337 static __inline__ void set_dma_addr0(unsigned int dmanr, unsigned int a)
338 {
339 	struct dma_chan *chan = get_dma_chan(dmanr);
340 	if (!chan)
341 		return;
342 	au_writel(a, chan->io + DMA_BUFFER0_START);
343 }
344 
345 /*
346  * Set Buffer 1 transfer address for specific DMA channel.
347  */
set_dma_addr1(unsigned int dmanr,unsigned int a)348 static __inline__ void set_dma_addr1(unsigned int dmanr, unsigned int a)
349 {
350 	struct dma_chan *chan = get_dma_chan(dmanr);
351 	if (!chan)
352 		return;
353 	au_writel(a, chan->io + DMA_BUFFER1_START);
354 }
355 
356 
357 /*
358  * Set Buffer 0 transfer size (max 64k) for a specific DMA channel.
359  */
set_dma_count0(unsigned int dmanr,unsigned int count)360 static __inline__ void set_dma_count0(unsigned int dmanr,
361 				      unsigned int count)
362 {
363 	struct dma_chan *chan = get_dma_chan(dmanr);
364 	if (!chan)
365 		return;
366 	count &= DMA_COUNT_MASK;
367 	au_writel(count, chan->io + DMA_BUFFER0_COUNT);
368 }
369 
370 /*
371  * Set Buffer 1 transfer size (max 64k) for a specific DMA channel.
372  */
set_dma_count1(unsigned int dmanr,unsigned int count)373 static __inline__ void set_dma_count1(unsigned int dmanr,
374 				      unsigned int count)
375 {
376 	struct dma_chan *chan = get_dma_chan(dmanr);
377 	if (!chan)
378 		return;
379 	count &= DMA_COUNT_MASK;
380 	au_writel(count, chan->io + DMA_BUFFER1_COUNT);
381 }
382 
383 /*
384  * Set both buffer transfer sizes (max 64k) for a specific DMA channel.
385  */
set_dma_count(unsigned int dmanr,unsigned int count)386 static __inline__ void set_dma_count(unsigned int dmanr,
387 				     unsigned int count)
388 {
389 	struct dma_chan *chan = get_dma_chan(dmanr);
390 	if (!chan)
391 		return;
392 	count &= DMA_COUNT_MASK;
393 	au_writel(count, chan->io + DMA_BUFFER0_COUNT);
394 	au_writel(count, chan->io + DMA_BUFFER1_COUNT);
395 }
396 
397 /*
398  * Returns which buffer has its done bit set in the mode register.
399  * Returns -1 if neither or both done bits set.
400  */
get_dma_buffer_done(unsigned int dmanr)401 static __inline__ unsigned int get_dma_buffer_done(unsigned int dmanr)
402 {
403 	struct dma_chan *chan = get_dma_chan(dmanr);
404 	if (!chan)
405 		return 0;
406 
407     return au_readl(chan->io + DMA_MODE_READ) & (DMA_D0 | DMA_D1);
408 }
409 
410 
411 /*
412  * Returns the DMA channel's Buffer Done IRQ number.
413  */
get_dma_done_irq(unsigned int dmanr)414 static __inline__ int get_dma_done_irq(unsigned int dmanr)
415 {
416 	struct dma_chan *chan = get_dma_chan(dmanr);
417 	if (!chan)
418 		return -1;
419 
420 	return chan->irq;
421 }
422 
423 /*
424  * Get DMA residue count. Returns the number of _bytes_ left to transfer.
425  */
get_dma_residue(unsigned int dmanr)426 static __inline__ int get_dma_residue(unsigned int dmanr)
427 {
428 	int curBufCntReg, count;
429 	struct dma_chan *chan = get_dma_chan(dmanr);
430 	if (!chan)
431 		return 0;
432 
433 	curBufCntReg = (au_readl(chan->io + DMA_MODE_READ) & DMA_AB) ?
434 	    DMA_BUFFER1_COUNT : DMA_BUFFER0_COUNT;
435 
436 	count = au_readl(chan->io + curBufCntReg) & DMA_COUNT_MASK;
437 
438 	if ((chan->mode & DMA_DW_MASK) == DMA_DW16)
439 		count <<= 1;
440 	else if ((chan->mode & DMA_DW_MASK) == DMA_DW32)
441 		count <<= 2;
442 
443 	return count;
444 }
445 
446 #endif /* __ASM_AU1000_DMA_H */
447 
448