1 #ifndef __ASM_SH64_IO_H
2 #define __ASM_SH64_IO_H
3 
4 /*
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * include/asm-sh64/io.h
10  *
11  * Copyright (C) 2000, 2001  Paolo Alberelli
12  * Copyright (C) 2003  Paul Mundt
13  *
14  */
15 
16 /*
17  * Convention:
18  *    read{b,w,l}/write{b,w,l} are for PCI,
19  *    while in{b,w,l}/out{b,w,l} are for ISA
20  * These may (will) be platform specific function.
21  *
22  * In addition, we have
23  *   ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
24  * which are processor specific. Address should be the result of
25  * onchip_remap();
26  */
27 
28 #include <asm/cache.h>
29 #include <asm/system.h>
30 
31 #define virt_to_bus virt_to_phys
32 #define bus_to_virt phys_to_virt
33 #define page_to_bus page_to_phys
34 
35 /*
36  * Nothing overly special here.. instead of doing the same thing
37  * over and over again, we just define a set of sh64_in/out functions
38  * with an implicit size. The traditional read{b,w,l}/write{b,w,l}
39  * mess is wrapped to this, as are the SH-specific ctrl_in/out routines.
40  */
sh64_in8(unsigned long addr)41 static inline unsigned char sh64_in8(unsigned long addr)
42 {
43 	return *(volatile unsigned char *)addr;
44 }
45 
sh64_in16(unsigned long addr)46 static inline unsigned short sh64_in16(unsigned long addr)
47 {
48 	return *(volatile unsigned short *)addr;
49 }
50 
sh64_in32(unsigned long addr)51 static inline unsigned long sh64_in32(unsigned long addr)
52 {
53 	return *(volatile unsigned long *)addr;
54 }
55 
sh64_in64(unsigned long addr)56 static inline unsigned long long sh64_in64(unsigned long addr)
57 {
58 	return *(volatile unsigned long long *)addr;
59 }
60 
sh64_out8(unsigned char b,unsigned long addr)61 static inline void sh64_out8(unsigned char b, unsigned long addr)
62 {
63 	*(volatile unsigned char *)addr = b;
64 	wmb();
65 }
66 
sh64_out16(unsigned short b,unsigned long addr)67 static inline void sh64_out16(unsigned short b, unsigned long addr)
68 {
69 	*(volatile unsigned short *)addr = b;
70 	wmb();
71 }
72 
sh64_out32(unsigned long b,unsigned long addr)73 static inline void sh64_out32(unsigned long b, unsigned long addr)
74 {
75 	*(volatile unsigned long *)addr = b;
76 	wmb();
77 }
78 
sh64_out64(unsigned long long b,unsigned long addr)79 static inline void sh64_out64(unsigned long long b, unsigned long addr)
80 {
81 	*(volatile unsigned long long *)addr = b;
82 	wmb();
83 }
84 
85 #define readb(addr)		sh64_in8(addr)
86 #define readw(addr)		sh64_in16(addr)
87 #define readl(addr)		sh64_in32(addr)
88 
89 #define writeb(b, addr)		sh64_out8(b, addr)
90 #define writew(b, addr)		sh64_out16(b, addr)
91 #define writel(b, addr)		sh64_out32(b, addr)
92 
93 #define ctrl_inb(addr)		sh64_in8(addr)
94 #define ctrl_inw(addr)		sh64_in16(addr)
95 #define ctrl_inl(addr)		sh64_in32(addr)
96 
97 #define ctrl_outb(b, addr)	sh64_out8(b, addr)
98 #define ctrl_outw(b, addr)	sh64_out16(b, addr)
99 #define ctrl_outl(b, addr)	sh64_out32(b, addr)
100 
101 unsigned long inb(unsigned long port);
102 unsigned long inw(unsigned long port);
103 unsigned long inl(unsigned long port);
104 void outb(unsigned long value, unsigned long port);
105 void outw(unsigned long value, unsigned long port);
106 void outl(unsigned long value, unsigned long port);
107 
108 #ifdef __KERNEL__
109 
110 #ifdef CONFIG_SH_CAYMAN
111 extern unsigned long smsc_superio_virt;
112 #endif
113 #ifdef CONFIG_PCI
114 extern unsigned long pciio_virt;
115 #endif
116 
117 #define IO_SPACE_LIMIT 0xffffffff
118 
119 /*
120  * Change virtual addresses to physical addresses and vv.
121  * These are trivial on the 1:1 Linux/SuperH mapping
122  */
virt_to_phys(volatile void * address)123 extern __inline__ unsigned long virt_to_phys(volatile void * address)
124 {
125 	return __pa(address);
126 }
127 
phys_to_virt(unsigned long address)128 extern __inline__ void * phys_to_virt(unsigned long address)
129 {
130 	return __va(address);
131 }
132 
133 extern void * __ioremap(unsigned long phys_addr, unsigned long size,
134 			unsigned long flags);
135 
ioremap(unsigned long phys_addr,unsigned long size)136 extern __inline__ void * ioremap(unsigned long phys_addr, unsigned long size)
137 {
138 	return __ioremap(phys_addr, size, 1);
139 }
140 
ioremap_nocache(unsigned long phys_addr,unsigned long size)141 extern __inline__ void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
142 {
143 	return __ioremap(phys_addr, size, 0);
144 }
145 
146 extern void iounmap(void *addr);
147 
148 unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
149 extern void onchip_unmap(unsigned long vaddr);
150 
check_signature(unsigned long io_addr,const unsigned char * signature,int length)151 static __inline__ int check_signature(unsigned long io_addr,
152 			const unsigned char *signature, int length)
153 {
154 	int retval = 0;
155 	do {
156 		if (readb(io_addr) != *signature)
157 			goto out;
158 		io_addr++;
159 		signature++;
160 		length--;
161 	} while (length);
162 	retval = 1;
163 out:
164 	return retval;
165 }
166 
167 /*
168  * The caches on some architectures aren't dma-coherent and have need to
169  * handle this in software.  There are three types of operations that
170  * can be applied to dma buffers.
171  *
172  *  - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
173  *    writing the content of the caches back to memory, if necessary.
174  *    The function also invalidates the affected part of the caches as
175  *    necessary before DMA transfers from outside to memory.
176  *  - dma_cache_inv(start, size) invalidates the affected parts of the
177  *    caches.  Dirty lines of the caches may be written back or simply
178  *    be discarded.  This operation is necessary before dma operations
179  *    to the memory.
180  *  - dma_cache_wback(start, size) writes back any dirty lines but does
181  *    not invalidate the cache.  This can be used before DMA reads from
182  *    memory,
183  */
184 
185 /*
186  * Implemented despite DMA is not yet supported on ST50.
187  *
188  * Also note that PCI DMA is supposed to be cache coherent,
189  * therefore these should not be used by PCI device drivers.
190  *
191  */
192 
dma_cache_wback_inv(unsigned long start,unsigned long size)193 static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
194 {
195 	unsigned long s = start & L1_CACHE_ALIGN_MASK;
196 	unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
197 
198 	for (; s <= e; s += L1_CACHE_BYTES)
199 		asm volatile ("ocbp	%0, 0" : : "r" (s));
200 }
201 
dma_cache_inv(unsigned long start,unsigned long size)202 static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
203 {
204 	// Note that caller has to be careful with overzealous
205 	// invalidation should there be partial cache lines at the extremities
206 	// of the specified range
207 	unsigned long s = start & L1_CACHE_ALIGN_MASK;
208 	unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
209 
210 	for (; s <= e; s += L1_CACHE_BYTES)
211 		asm volatile ("ocbi	%0, 0" : : "r" (s));
212 }
213 
dma_cache_wback(unsigned long start,unsigned long size)214 static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
215 {
216 	unsigned long s = start & L1_CACHE_ALIGN_MASK;
217 	unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
218 
219 	for (; s <= e; s += L1_CACHE_BYTES)
220 		asm volatile ("ocbwb	%0, 0" : : "r" (s));
221 }
222 
223 #endif /* __KERNEL__ */
224 #endif /* __ASM_SH64_IO_H */
225