1 #ifndef _H8300_IO_H
2 #define _H8300_IO_H
3
4 #ifdef __KERNEL__
5
6 #include <asm/virtconvert.h>
7
8 #if defined(CONFIG_H83007) || defined(CONFIG_H83068)
9 #include <asm/regs306x.h>
10 #elif defined(CONFIG_H8S2678)
11 #include <asm/regs267x.h>
12 #else
13 #error UNKNOWN CPU TYPE
14 #endif
15
16
17 /*
18 * These are for ISA/PCI shared memory _only_ and should never be used
19 * on any other type of memory, including Zorro memory. They are meant to
20 * access the bus in the bus byte order which is little-endian!.
21 *
22 * readX/writeX() are used to access memory mapped devices. On some
23 * architectures the memory mapped IO stuff needs to be accessed
24 * differently. On the m68k architecture, we just read/write the
25 * memory location directly.
26 */
27 /* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
28 * two accesses to memory, which may be undesirable for some devices.
29 */
30
31 /*
32 * swap functions are sometimes needed to interface little-endian hardware
33 */
34
_swapw(volatile unsigned short v)35 static inline unsigned short _swapw(volatile unsigned short v)
36 {
37 #ifndef H8300_IO_NOSWAP
38 unsigned short r;
39 __asm__("xor.b %w0,%x0\n\t"
40 "xor.b %x0,%w0\n\t"
41 "xor.b %w0,%x0"
42 :"=r"(r)
43 :"0"(v));
44 return r;
45 #else
46 return v;
47 #endif
48 }
49
_swapl(volatile unsigned long v)50 static inline unsigned long _swapl(volatile unsigned long v)
51 {
52 #ifndef H8300_IO_NOSWAP
53 unsigned long r;
54 __asm__("xor.b %w0,%x0\n\t"
55 "xor.b %x0,%w0\n\t"
56 "xor.b %w0,%x0\n\t"
57 "xor.w %e0,%f0\n\t"
58 "xor.w %f0,%e0\n\t"
59 "xor.w %e0,%f0\n\t"
60 "xor.b %w0,%x0\n\t"
61 "xor.b %x0,%w0\n\t"
62 "xor.b %w0,%x0"
63 :"=r"(r)
64 :"0"(v));
65 return r;
66 #else
67 return v;
68 #endif
69 }
70
71 #define readb(addr) \
72 ({ unsigned char __v = \
73 *(volatile unsigned char *)((unsigned long)(addr) & 0x00ffffff); \
74 __v; })
75 #define readw(addr) \
76 ({ unsigned short __v = \
77 *(volatile unsigned short *)((unsigned long)(addr) & 0x00ffffff); \
78 __v; })
79 #define readl(addr) \
80 ({ unsigned long __v = \
81 *(volatile unsigned long *)((unsigned long)(addr) & 0x00ffffff); \
82 __v; })
83
84 #define writeb(b,addr) (void)((*(volatile unsigned char *) \
85 ((unsigned long)(addr) & 0x00ffffff)) = (b))
86 #define writew(b,addr) (void)((*(volatile unsigned short *) \
87 ((unsigned long)(addr) & 0x00ffffff)) = (b))
88 #define writel(b,addr) (void)((*(volatile unsigned long *) \
89 ((unsigned long)(addr) & 0x00ffffff)) = (b))
90 #define readb_relaxed(addr) readb(addr)
91 #define readw_relaxed(addr) readw(addr)
92 #define readl_relaxed(addr) readl(addr)
93
94 #define __raw_readb readb
95 #define __raw_readw readw
96 #define __raw_readl readl
97 #define __raw_writeb writeb
98 #define __raw_writew writew
99 #define __raw_writel writel
100
h8300_buswidth(unsigned int addr)101 static inline int h8300_buswidth(unsigned int addr)
102 {
103 return (*(volatile unsigned char *)ABWCR & (1 << ((addr >> 21) & 7))) == 0;
104 }
105
io_outsb(unsigned int addr,const void * buf,int len)106 static inline void io_outsb(unsigned int addr, const void *buf, int len)
107 {
108 volatile unsigned char *ap_b = (volatile unsigned char *) addr;
109 volatile unsigned short *ap_w = (volatile unsigned short *) addr;
110 unsigned char *bp = (unsigned char *) buf;
111
112 if(h8300_buswidth(addr) && (addr & 1)) {
113 while (len--)
114 *ap_w = *bp++;
115 } else {
116 while (len--)
117 *ap_b = *bp++;
118 }
119 }
120
io_outsw(unsigned int addr,const void * buf,int len)121 static inline void io_outsw(unsigned int addr, const void *buf, int len)
122 {
123 volatile unsigned short *ap = (volatile unsigned short *) addr;
124 unsigned short *bp = (unsigned short *) buf;
125 while (len--)
126 *ap = _swapw(*bp++);
127 }
128
io_outsl(unsigned int addr,const void * buf,int len)129 static inline void io_outsl(unsigned int addr, const void *buf, int len)
130 {
131 volatile unsigned long *ap = (volatile unsigned long *) addr;
132 unsigned long *bp = (unsigned long *) buf;
133 while (len--)
134 *ap = _swapl(*bp++);
135 }
136
io_outsw_noswap(unsigned int addr,const void * buf,int len)137 static inline void io_outsw_noswap(unsigned int addr, const void *buf, int len)
138 {
139 volatile unsigned short *ap = (volatile unsigned short *) addr;
140 unsigned short *bp = (unsigned short *) buf;
141 while (len--)
142 *ap = *bp++;
143 }
144
io_outsl_noswap(unsigned int addr,const void * buf,int len)145 static inline void io_outsl_noswap(unsigned int addr, const void *buf, int len)
146 {
147 volatile unsigned long *ap = (volatile unsigned long *) addr;
148 unsigned long *bp = (unsigned long *) buf;
149 while (len--)
150 *ap = *bp++;
151 }
152
io_insb(unsigned int addr,void * buf,int len)153 static inline void io_insb(unsigned int addr, void *buf, int len)
154 {
155 volatile unsigned char *ap_b;
156 volatile unsigned short *ap_w;
157 unsigned char *bp = (unsigned char *) buf;
158
159 if(h8300_buswidth(addr)) {
160 ap_w = (volatile unsigned short *)(addr & ~1);
161 while (len--)
162 *bp++ = *ap_w & 0xff;
163 } else {
164 ap_b = (volatile unsigned char *)addr;
165 while (len--)
166 *bp++ = *ap_b;
167 }
168 }
169
io_insw(unsigned int addr,void * buf,int len)170 static inline void io_insw(unsigned int addr, void *buf, int len)
171 {
172 volatile unsigned short *ap = (volatile unsigned short *) addr;
173 unsigned short *bp = (unsigned short *) buf;
174 while (len--)
175 *bp++ = _swapw(*ap);
176 }
177
io_insl(unsigned int addr,void * buf,int len)178 static inline void io_insl(unsigned int addr, void *buf, int len)
179 {
180 volatile unsigned long *ap = (volatile unsigned long *) addr;
181 unsigned long *bp = (unsigned long *) buf;
182 while (len--)
183 *bp++ = _swapl(*ap);
184 }
185
io_insw_noswap(unsigned int addr,void * buf,int len)186 static inline void io_insw_noswap(unsigned int addr, void *buf, int len)
187 {
188 volatile unsigned short *ap = (volatile unsigned short *) addr;
189 unsigned short *bp = (unsigned short *) buf;
190 while (len--)
191 *bp++ = *ap;
192 }
193
io_insl_noswap(unsigned int addr,void * buf,int len)194 static inline void io_insl_noswap(unsigned int addr, void *buf, int len)
195 {
196 volatile unsigned long *ap = (volatile unsigned long *) addr;
197 unsigned long *bp = (unsigned long *) buf;
198 while (len--)
199 *bp++ = *ap;
200 }
201
202 /*
203 * make the short names macros so specific devices
204 * can override them as required
205 */
206
207 #define memset_io(a,b,c) memset((void *)(a),(b),(c))
208 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
209 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
210
211 #define mmiowb()
212
213 #define inb(addr) ((h8300_buswidth(addr))?readw((addr) & ~1) & 0xff:readb(addr))
214 #define inw(addr) _swapw(readw(addr))
215 #define inl(addr) _swapl(readl(addr))
216 #define outb(x,addr) ((void)((h8300_buswidth(addr) && \
217 ((addr) & 1))?writew(x,(addr) & ~1):writeb(x,addr)))
218 #define outw(x,addr) ((void) writew(_swapw(x),addr))
219 #define outl(x,addr) ((void) writel(_swapl(x),addr))
220
221 #define inb_p(addr) inb(addr)
222 #define inw_p(addr) inw(addr)
223 #define inl_p(addr) inl(addr)
224 #define outb_p(x,addr) outb(x,addr)
225 #define outw_p(x,addr) outw(x,addr)
226 #define outl_p(x,addr) outl(x,addr)
227
228 #define outsb(a,b,l) io_outsb(a,b,l)
229 #define outsw(a,b,l) io_outsw(a,b,l)
230 #define outsl(a,b,l) io_outsl(a,b,l)
231
232 #define insb(a,b,l) io_insb(a,b,l)
233 #define insw(a,b,l) io_insw(a,b,l)
234 #define insl(a,b,l) io_insl(a,b,l)
235
236 #define IO_SPACE_LIMIT 0xffffff
237
238
239 /* Values for nocacheflag and cmode */
240 #define IOMAP_FULL_CACHING 0
241 #define IOMAP_NOCACHE_SER 1
242 #define IOMAP_NOCACHE_NONSER 2
243 #define IOMAP_WRITETHROUGH 3
244
245 extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
246 extern void __iounmap(void *addr, unsigned long size);
247
ioremap(unsigned long physaddr,unsigned long size)248 static inline void *ioremap(unsigned long physaddr, unsigned long size)
249 {
250 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
251 }
ioremap_nocache(unsigned long physaddr,unsigned long size)252 static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
253 {
254 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
255 }
ioremap_writethrough(unsigned long physaddr,unsigned long size)256 static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size)
257 {
258 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
259 }
ioremap_fullcache(unsigned long physaddr,unsigned long size)260 static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size)
261 {
262 return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
263 }
264
265 extern void iounmap(void *addr);
266
267 /* H8/300 internal I/O functions */
ctrl_inb(unsigned long addr)268 static __inline__ unsigned char ctrl_inb(unsigned long addr)
269 {
270 return *(volatile unsigned char*)addr;
271 }
272
ctrl_inw(unsigned long addr)273 static __inline__ unsigned short ctrl_inw(unsigned long addr)
274 {
275 return *(volatile unsigned short*)addr;
276 }
277
ctrl_inl(unsigned long addr)278 static __inline__ unsigned long ctrl_inl(unsigned long addr)
279 {
280 return *(volatile unsigned long*)addr;
281 }
282
ctrl_outb(unsigned char b,unsigned long addr)283 static __inline__ void ctrl_outb(unsigned char b, unsigned long addr)
284 {
285 *(volatile unsigned char*)addr = b;
286 }
287
ctrl_outw(unsigned short b,unsigned long addr)288 static __inline__ void ctrl_outw(unsigned short b, unsigned long addr)
289 {
290 *(volatile unsigned short*)addr = b;
291 }
292
ctrl_outl(unsigned long b,unsigned long addr)293 static __inline__ void ctrl_outl(unsigned long b, unsigned long addr)
294 {
295 *(volatile unsigned long*)addr = b;
296 }
297
ctrl_bclr(int b,unsigned long addr)298 static __inline__ void ctrl_bclr(int b, unsigned long addr)
299 {
300 if (__builtin_constant_p(b))
301 switch (b) {
302 case 0: __asm__("bclr #0,@%0"::"r"(addr)); break;
303 case 1: __asm__("bclr #1,@%0"::"r"(addr)); break;
304 case 2: __asm__("bclr #2,@%0"::"r"(addr)); break;
305 case 3: __asm__("bclr #3,@%0"::"r"(addr)); break;
306 case 4: __asm__("bclr #4,@%0"::"r"(addr)); break;
307 case 5: __asm__("bclr #5,@%0"::"r"(addr)); break;
308 case 6: __asm__("bclr #6,@%0"::"r"(addr)); break;
309 case 7: __asm__("bclr #7,@%0"::"r"(addr)); break;
310 }
311 else
312 __asm__("bclr %w0,@%1"::"r"(b), "r"(addr));
313 }
314
ctrl_bset(int b,unsigned long addr)315 static __inline__ void ctrl_bset(int b, unsigned long addr)
316 {
317 if (__builtin_constant_p(b))
318 switch (b) {
319 case 0: __asm__("bset #0,@%0"::"r"(addr)); break;
320 case 1: __asm__("bset #1,@%0"::"r"(addr)); break;
321 case 2: __asm__("bset #2,@%0"::"r"(addr)); break;
322 case 3: __asm__("bset #3,@%0"::"r"(addr)); break;
323 case 4: __asm__("bset #4,@%0"::"r"(addr)); break;
324 case 5: __asm__("bset #5,@%0"::"r"(addr)); break;
325 case 6: __asm__("bset #6,@%0"::"r"(addr)); break;
326 case 7: __asm__("bset #7,@%0"::"r"(addr)); break;
327 }
328 else
329 __asm__("bset %w0,@%1"::"r"(b), "r"(addr));
330 }
331
332 /* Pages to physical address... */
333 #define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
334 #define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
335
336 /*
337 * Macros used for converting between virtual and physical mappings.
338 */
339 #define phys_to_virt(vaddr) ((void *) (vaddr))
340 #define virt_to_phys(vaddr) ((unsigned long) (vaddr))
341
342 #define virt_to_bus virt_to_phys
343 #define bus_to_virt phys_to_virt
344
345 /*
346 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
347 * access
348 */
349 #define xlate_dev_mem_ptr(p) __va(p)
350
351 /*
352 * Convert a virtual cached pointer to an uncached pointer
353 */
354 #define xlate_dev_kmem_ptr(p) p
355
356 #endif /* __KERNEL__ */
357
358 #endif /* _H8300_IO_H */
359