1 #ifdef __KERNEL__
2 #ifndef _PPC_IO_H
3 #define _PPC_IO_H
4
5 #include <linux/config.h>
6 #include <linux/mm.h>
7 #include <linux/types.h>
8 #include <asm/mmu.h>
9 #include <asm/page.h>
10 #include <asm/byteorder.h>
11
12 #define SIO_CONFIG_RA 0x398
13 #define SIO_CONFIG_RD 0x399
14
15 #define SLOW_DOWN_IO
16
17 #define PMAC_ISA_MEM_BASE 0
18 #define PMAC_PCI_DRAM_OFFSET 0
19 #define CHRP_ISA_IO_BASE 0xf8000000
20 #define CHRP_ISA_MEM_BASE 0xf7000000
21 #define CHRP_PCI_DRAM_OFFSET 0
22 #define PREP_ISA_IO_BASE 0x80000000
23 #define PREP_ISA_MEM_BASE 0xc0000000
24 #define PREP_PCI_DRAM_OFFSET 0x80000000
25
26 #if defined(CONFIG_40x)
27 #include <asm/ibm4xx.h>
28 #elif defined(CONFIG_8xx)
29 #include <asm/mpc8xx.h>
30 #elif defined(CONFIG_8260)
31 #include <asm/mpc8260.h>
32 #elif defined(CONFIG_APUS)
33 #define _IO_BASE 0
34 #define _ISA_MEM_BASE 0
35 #define PCI_DRAM_OFFSET 0
36 #else /* Everyone else */
37 #define _IO_BASE isa_io_base
38 #define _ISA_MEM_BASE isa_mem_base
39 #define PCI_DRAM_OFFSET pci_dram_offset
40 #endif /* Platform-dependant I/O */
41
42 extern unsigned long isa_io_base;
43 extern unsigned long isa_mem_base;
44 extern unsigned long pci_dram_offset;
45
46 #define readb(addr) in_8((volatile u8 *)(addr))
47 #define writeb(b,addr) out_8((volatile u8 *)(addr), (b))
48 #if defined(CONFIG_APUS)
49 #define readw(addr) (*(volatile u16 *) (addr))
50 #define readl(addr) (*(volatile u32 *) (addr))
51 #define writew(b,addr) ((*(volatile u16 *) (addr)) = (b))
52 #define writel(b,addr) ((*(volatile u32 *) (addr)) = (b))
53 #else
54 #define readw(addr) in_le16((volatile u16 *)(addr))
55 #define readl(addr) in_le32((volatile u32 *)(addr))
56 #define writew(b,addr) out_le16((volatile u16 *)(addr),(b))
57 #define writel(b,addr) out_le32((volatile u32 *)(addr),(b))
58 #endif
59
60
61 #define __raw_readb(addr) (*(volatile unsigned char *)(addr))
62 #define __raw_readw(addr) (*(volatile unsigned short *)(addr))
63 #define __raw_readl(addr) (*(volatile unsigned int *)(addr))
64 #define __raw_writeb(v, addr) (*(volatile unsigned char *)(addr) = (v))
65 #define __raw_writew(v, addr) (*(volatile unsigned short *)(addr) = (v))
66 #define __raw_writel(v, addr) (*(volatile unsigned int *)(addr) = (v))
67
68 /*
69 * The insw/outsw/insl/outsl macros don't do byte-swapping.
70 * They are only used in practice for transferring buffers which
71 * are arrays of bytes, and byte-swapping is not appropriate in
72 * that case. - paulus
73 */
74 #define insb(port, buf, ns) _insb((u8 *)((port)+_IO_BASE), (buf), (ns))
75 #define outsb(port, buf, ns) _outsb((u8 *)((port)+_IO_BASE), (buf), (ns))
76 #define insw(port, buf, ns) _insw_ns((u16 *)((port)+_IO_BASE), (buf), (ns))
77 #define outsw(port, buf, ns) _outsw_ns((u16 *)((port)+_IO_BASE), (buf), (ns))
78 #define insl(port, buf, nl) _insl_ns((u32 *)((port)+_IO_BASE), (buf), (nl))
79 #define outsl(port, buf, nl) _outsl_ns((u32 *)((port)+_IO_BASE), (buf), (nl))
80
81 #ifdef CONFIG_ALL_PPC
82 /*
83 * On powermacs, we will get a machine check exception if we
84 * try to read data from a non-existent I/O port. Because the
85 * machine check is an asynchronous exception, it isn't
86 * well-defined which instruction SRR0 will point to when the
87 * exception occurs.
88 * With the sequence below (twi; isync; nop), we have found that
89 * the machine check occurs on one of the three instructions on
90 * all PPC implementations tested so far. The twi and isync are
91 * needed on the 601 (in fact twi; sync works too), the isync and
92 * nop are needed on 604[e|r], and any of twi, sync or isync will
93 * work on 603[e], 750, 74x0.
94 * The twi creates an explicit data dependency on the returned
95 * value which seems to be needed to make the 601 wait for the
96 * load to finish.
97 */
98
99 #define __do_in_asm(name, op) \
100 extern __inline__ unsigned int name(unsigned int port) \
101 { \
102 unsigned int x; \
103 __asm__ __volatile__( \
104 op " %0,0,%1\n" \
105 "1: twi 0,%0,0\n" \
106 "2: isync\n" \
107 "3: nop\n" \
108 "4:\n" \
109 ".section .fixup,\"ax\"\n" \
110 "5: li %0,-1\n" \
111 " b 4b\n" \
112 ".previous\n" \
113 ".section __ex_table,\"a\"\n" \
114 " .align 2\n" \
115 " .long 1b,5b\n" \
116 " .long 2b,5b\n" \
117 " .long 3b,5b\n" \
118 ".previous" \
119 : "=&r" (x) \
120 : "r" (port + _IO_BASE)); \
121 return x; \
122 }
123
124 #define __do_out_asm(name, op) \
125 extern __inline__ void name(unsigned int val, unsigned int port) \
126 { \
127 __asm__ __volatile__( \
128 op " %0,0,%1\n" \
129 "1: sync\n" \
130 "2:\n" \
131 ".section __ex_table,\"a\"\n" \
132 " .align 2\n" \
133 " .long 1b,2b\n" \
134 ".previous" \
135 : : "r" (val), "r" (port + _IO_BASE)); \
136 }
137
138 __do_in_asm(inb, "lbzx")
139 __do_in_asm(inw, "lhbrx")
140 __do_in_asm(inl, "lwbrx")
141 __do_out_asm(outb, "stbx")
142 __do_out_asm(outw, "sthbrx")
143 __do_out_asm(outl, "stwbrx")
144
145 #elif defined(CONFIG_APUS)
146 #define inb(port) in_8((u8 *)((port)+_IO_BASE))
147 #define outb(val, port) out_8((u8 *)((port)+_IO_BASE), (val))
148 #define inw(port) in_be16((u16 *)((port)+_IO_BASE))
149 #define outw(val, port) out_be16((u16 *)((port)+_IO_BASE), (val))
150 #define inl(port) in_be32((u32 *)((port)+_IO_BASE))
151 #define outl(val, port) out_be32((u32 *)((port)+_IO_BASE), (val))
152
153 #else /* not APUS or ALL_PPC */
154 #define inb(port) in_8((u8 *)((port)+_IO_BASE))
155 #define outb(val, port) out_8((u8 *)((port)+_IO_BASE), (val))
156 #define inw(port) in_le16((u16 *)((port)+_IO_BASE))
157 #define outw(val, port) out_le16((u16 *)((port)+_IO_BASE), (val))
158 #define inl(port) in_le32((u32 *)((port)+_IO_BASE))
159 #define outl(val, port) out_le32((u32 *)((port)+_IO_BASE), (val))
160 #endif
161
162 #define inb_p(port) inb((port))
163 #define outb_p(val, port) outb((val), (port))
164 #define inw_p(port) inw((port))
165 #define outw_p(val, port) outw((val), (port))
166 #define inl_p(port) inl((port))
167 #define outl_p(val, port) outl((val), (port))
168
169 extern void _insb(volatile u8 *port, void *buf, int ns);
170 extern void _outsb(volatile u8 *port, const void *buf, int ns);
171 extern void _insw(volatile u16 *port, void *buf, int ns);
172 extern void _outsw(volatile u16 *port, const void *buf, int ns);
173 extern void _insl(volatile u32 *port, void *buf, int nl);
174 extern void _outsl(volatile u32 *port, const void *buf, int nl);
175 extern void _insw_ns(volatile u16 *port, void *buf, int ns);
176 extern void _outsw_ns(volatile u16 *port, const void *buf, int ns);
177 extern void _insl_ns(volatile u32 *port, void *buf, int nl);
178 extern void _outsl_ns(volatile u32 *port, const void *buf, int nl);
179
180 /*
181 * The *_ns versions below don't do byte-swapping.
182 * Neither do the standard versions now, these are just here
183 * for older code.
184 */
185 #define insw_ns(port, buf, ns) _insw_ns((u16 *)((port)+_IO_BASE), (buf), (ns))
186 #define outsw_ns(port, buf, ns) _outsw_ns((u16 *)((port)+_IO_BASE), (buf), (ns))
187 #define insl_ns(port, buf, nl) _insl_ns((u32 *)((port)+_IO_BASE), (buf), (nl))
188 #define outsl_ns(port, buf, nl) _outsl_ns((u32 *)((port)+_IO_BASE), (buf), (nl))
189
190
191 #define IO_SPACE_LIMIT ~0
192
193 #define memset_io(a,b,c) memset((void *)(a),(b),(c))
194 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
195 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
196
197 /*
198 * Map in an area of physical address space, for accessing
199 * I/O devices etc.
200 */
201 extern void *__ioremap(phys_addr_t address, unsigned long size,
202 unsigned long flags);
203 extern void *ioremap(phys_addr_t address, unsigned long size);
204 extern void *ioremap64(unsigned long long address, unsigned long size);
205 #define ioremap_nocache(addr, size) ioremap((addr), (size))
206 extern void iounmap(void *addr);
207 extern unsigned long iopa(unsigned long addr);
208 extern unsigned long mm_ptov(unsigned long addr) __attribute__ ((const));
209 extern void io_block_mapping(unsigned long virt, phys_addr_t phys,
210 unsigned int size, int flags);
211
212 /*
213 * This makes sure that a value has been returned from a device
214 * before any subsequent loads or stores are performed.
215 */
io_flush(int value)216 extern inline void io_flush(int value)
217 {
218 __asm__ __volatile__("twi 0,%0,0; isync" : : "r" (value));
219 }
220
221 /*
222 * The PCI bus is inherently Little-Endian. The PowerPC is being
223 * run Big-Endian. Thus all values which cross the [PCI] barrier
224 * must be endian-adjusted. Also, the local DRAM has a different
225 * address from the PCI point of view, thus buffer addresses also
226 * have to be modified [mapped] appropriately.
227 */
virt_to_bus(volatile void * address)228 extern inline unsigned long virt_to_bus(volatile void * address)
229 {
230 #ifdef CONFIG_APUS
231 return (iopa((unsigned long) address) + PCI_DRAM_OFFSET);
232 #else
233 if (address == (void *)0)
234 return 0;
235 return (unsigned long)address - KERNELBASE + PCI_DRAM_OFFSET;
236 #endif
237 }
238
bus_to_virt(unsigned long address)239 extern inline void * bus_to_virt(unsigned long address)
240 {
241 #ifdef CONFIG_APUS
242 return (void*) mm_ptov (address - PCI_DRAM_OFFSET);
243 #else
244 if (address == 0)
245 return 0;
246 return (void *)(address - PCI_DRAM_OFFSET + KERNELBASE);
247 #endif
248 }
249
250 /*
251 * Change virtual addresses to physical addresses and vv, for
252 * addresses in the area where the kernel has the RAM mapped.
253 */
virt_to_phys(volatile void * address)254 extern inline unsigned long virt_to_phys(volatile void * address)
255 {
256 #ifdef CONFIG_APUS
257 return iopa ((unsigned long) address);
258 #else
259 return (unsigned long) address - KERNELBASE;
260 #endif
261 }
262
phys_to_virt(unsigned long address)263 extern inline void * phys_to_virt(unsigned long address)
264 {
265 #ifdef CONFIG_APUS
266 return (void*) mm_ptov (address);
267 #else
268 return (void *) (address + KERNELBASE);
269 #endif
270 }
271
272 /*
273 * Change "struct page" to physical address.
274 */
275 #define page_to_phys(page) (((page - mem_map) << PAGE_SHIFT) + PPC_MEMSTART)
276 #define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
277
278 /*
279 * Enforce In-order Execution of I/O:
280 * Acts as a barrier to ensure all previous I/O accesses have
281 * completed before any further ones are issued.
282 */
eieio(void)283 extern inline void eieio(void)
284 {
285 __asm__ __volatile__ ("eieio" : : : "memory");
286 }
287
288 /* Enforce in-order execution of data I/O.
289 * No distinction between read/write on PPC; use eieio for all three.
290 */
291 #define iobarrier_rw() eieio()
292 #define iobarrier_r() eieio()
293 #define iobarrier_w() eieio()
294
295 /*
296 * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
297 *
298 * Read operations have additional twi & isync to make sure the read
299 * is actually performed (i.e. the data has come back) before we start
300 * executing any following instructions.
301 */
in_8(volatile unsigned char * addr)302 extern inline int in_8(volatile unsigned char *addr)
303 {
304 int ret;
305
306 __asm__ __volatile__(
307 "lbz%U1%X1 %0,%1;\n"
308 "twi 0,%0,0;\n"
309 "isync" : "=r" (ret) : "m" (*addr));
310 return ret;
311 }
312
out_8(volatile unsigned char * addr,int val)313 extern inline void out_8(volatile unsigned char *addr, int val)
314 {
315 __asm__ __volatile__("stb%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
316 }
317
in_le16(volatile unsigned short * addr)318 extern inline int in_le16(volatile unsigned short *addr)
319 {
320 int ret;
321
322 __asm__ __volatile__("lhbrx %0,0,%1;\n"
323 "twi 0,%0,0;\n"
324 "isync" : "=r" (ret) :
325 "r" (addr), "m" (*addr));
326 return ret;
327 }
328
in_be16(volatile unsigned short * addr)329 extern inline int in_be16(volatile unsigned short *addr)
330 {
331 int ret;
332
333 __asm__ __volatile__("lhz%U1%X1 %0,%1;\n"
334 "twi 0,%0,0;\n"
335 "isync" : "=r" (ret) : "m" (*addr));
336 return ret;
337 }
338
out_le16(volatile unsigned short * addr,int val)339 extern inline void out_le16(volatile unsigned short *addr, int val)
340 {
341 __asm__ __volatile__("sthbrx %1,0,%2; eieio" : "=m" (*addr) :
342 "r" (val), "r" (addr));
343 }
344
out_be16(volatile unsigned short * addr,int val)345 extern inline void out_be16(volatile unsigned short *addr, int val)
346 {
347 __asm__ __volatile__("sth%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
348 }
349
in_le32(volatile unsigned * addr)350 extern inline unsigned in_le32(volatile unsigned *addr)
351 {
352 unsigned ret;
353
354 __asm__ __volatile__("lwbrx %0,0,%1;\n"
355 "twi 0,%0,0;\n"
356 "isync" : "=r" (ret) :
357 "r" (addr), "m" (*addr));
358 return ret;
359 }
360
in_be32(volatile unsigned * addr)361 extern inline unsigned in_be32(volatile unsigned *addr)
362 {
363 unsigned ret;
364
365 __asm__ __volatile__("lwz%U1%X1 %0,%1;\n"
366 "twi 0,%0,0;\n"
367 "isync" : "=r" (ret) : "m" (*addr));
368 return ret;
369 }
370
out_le32(volatile unsigned * addr,int val)371 extern inline void out_le32(volatile unsigned *addr, int val)
372 {
373 __asm__ __volatile__("stwbrx %1,0,%2; eieio" : "=m" (*addr) :
374 "r" (val), "r" (addr));
375 }
376
out_be32(volatile unsigned * addr,int val)377 extern inline void out_be32(volatile unsigned *addr, int val)
378 {
379 __asm__ __volatile__("stw%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val));
380 }
381
check_signature(unsigned long io_addr,const unsigned char * signature,int length)382 static inline int check_signature(unsigned long io_addr,
383 const unsigned char *signature, int length)
384 {
385 int retval = 0;
386 do {
387 if (readb(io_addr) != *signature)
388 goto out;
389 io_addr++;
390 signature++;
391 length--;
392 } while (length);
393 retval = 1;
394 out:
395 return retval;
396 }
397
398 /* Make some pcmcia drivers happy */
isa_check_signature(unsigned long io_addr,const unsigned char * signature,int length)399 static inline int isa_check_signature(unsigned long io_addr,
400 const unsigned char *signature, int length)
401 {
402 return 0;
403 }
404
405 #ifdef CONFIG_NOT_COHERENT_CACHE
406
407 /*
408 * DMA-consistent mapping functions for PowerPCs that don't support
409 * cache snooping. These allocate/free a region of uncached mapped
410 * memory space for use with DMA devices. Alternatively, you could
411 * allocate the space "normally" and use the cache management functions
412 * to ensure it is consistent.
413 */
414 extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
415 extern void consistent_free(void *vaddr);
416 extern void consistent_sync(void *vaddr, size_t size, int rw);
417 extern void consistent_sync_page(struct page *page, unsigned long offset,
418 size_t size, int rw);
419
420 #define dma_cache_inv(_start,_size) \
421 invalidate_dcache_range(_start, (_start + _size))
422 #define dma_cache_wback(_start,_size) \
423 clean_dcache_range(_start, (_start + _size))
424 #define dma_cache_wback_inv(_start,_size) \
425 flush_dcache_range(_start, (_start + _size))
426
427 #else /* ! CONFIG_NOT_COHERENT_CACHE */
428
429 /*
430 * Cache coherent cores.
431 */
432
433 #define dma_cache_inv(_start,_size) do { } while (0)
434 #define dma_cache_wback(_start,_size) do { } while (0)
435 #define dma_cache_wback_inv(_start,_size) do { } while (0)
436
437 #define consistent_alloc(gfp, size, handle) NULL
438 #define consistent_free(addr, size) do { } while (0)
439 #define consistent_sync(addr, size, rw) do { } while (0)
440 #define consistent_sync_page(pg, off, sz, rw) do { } while (0)
441
442 #endif /* CONFIG_NOT_COHERENT_CACHE */
443 #endif /* _PPC_IO_H */
444 #endif /* __KERNEL__ */
445