1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
7  */
8 
9 #ifndef _ASM_SN_IO_H
10 #define _ASM_SN_IO_H
11 #include <linux/compiler.h>
12 #include <asm/intrinsics.h>
13 
14 extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
15 extern void __sn_mmiowb(void); /* Forward definition */
16 
17 extern int num_cnodes;
18 
19 #define __sn_mf_a()   ia64_mfa()
20 
21 extern void sn_dma_flush(unsigned long);
22 
23 #define __sn_inb ___sn_inb
24 #define __sn_inw ___sn_inw
25 #define __sn_inl ___sn_inl
26 #define __sn_outb ___sn_outb
27 #define __sn_outw ___sn_outw
28 #define __sn_outl ___sn_outl
29 #define __sn_readb ___sn_readb
30 #define __sn_readw ___sn_readw
31 #define __sn_readl ___sn_readl
32 #define __sn_readq ___sn_readq
33 #define __sn_readb_relaxed ___sn_readb_relaxed
34 #define __sn_readw_relaxed ___sn_readw_relaxed
35 #define __sn_readl_relaxed ___sn_readl_relaxed
36 #define __sn_readq_relaxed ___sn_readq_relaxed
37 
38 /*
39  * Convenience macros for setting/clearing bits using the above accessors
40  */
41 
42 #define __sn_setq_relaxed(addr, val) \
43 	writeq((__sn_readq_relaxed(addr) | (val)), (addr))
44 #define __sn_clrq_relaxed(addr, val) \
45 	writeq((__sn_readq_relaxed(addr) & ~(val)), (addr))
46 
47 /*
48  * The following routines are SN Platform specific, called when
49  * a reference is made to inX/outX set macros.  SN Platform
50  * inX set of macros ensures that Posted DMA writes on the
51  * Bridge is flushed.
52  *
53  * The routines should be self explainatory.
54  */
55 
56 static inline unsigned int
___sn_inb(unsigned long port)57 ___sn_inb (unsigned long port)
58 {
59 	volatile unsigned char *addr;
60 	unsigned char ret = -1;
61 
62 	if ((addr = sn_io_addr(port))) {
63 		ret = *addr;
64 		__sn_mf_a();
65 		sn_dma_flush((unsigned long)addr);
66 	}
67 	return ret;
68 }
69 
70 static inline unsigned int
___sn_inw(unsigned long port)71 ___sn_inw (unsigned long port)
72 {
73 	volatile unsigned short *addr;
74 	unsigned short ret = -1;
75 
76 	if ((addr = sn_io_addr(port))) {
77 		ret = *addr;
78 		__sn_mf_a();
79 		sn_dma_flush((unsigned long)addr);
80 	}
81 	return ret;
82 }
83 
84 static inline unsigned int
___sn_inl(unsigned long port)85 ___sn_inl (unsigned long port)
86 {
87 	volatile unsigned int *addr;
88 	unsigned int ret = -1;
89 
90 	if ((addr = sn_io_addr(port))) {
91 		ret = *addr;
92 		__sn_mf_a();
93 		sn_dma_flush((unsigned long)addr);
94 	}
95 	return ret;
96 }
97 
98 static inline void
___sn_outb(unsigned char val,unsigned long port)99 ___sn_outb (unsigned char val, unsigned long port)
100 {
101 	volatile unsigned char *addr;
102 
103 	if ((addr = sn_io_addr(port))) {
104 		*addr = val;
105 		__sn_mmiowb();
106 	}
107 }
108 
109 static inline void
___sn_outw(unsigned short val,unsigned long port)110 ___sn_outw (unsigned short val, unsigned long port)
111 {
112 	volatile unsigned short *addr;
113 
114 	if ((addr = sn_io_addr(port))) {
115 		*addr = val;
116 		__sn_mmiowb();
117 	}
118 }
119 
120 static inline void
___sn_outl(unsigned int val,unsigned long port)121 ___sn_outl (unsigned int val, unsigned long port)
122 {
123 	volatile unsigned int *addr;
124 
125 	if ((addr = sn_io_addr(port))) {
126 		*addr = val;
127 		__sn_mmiowb();
128 	}
129 }
130 
131 /*
132  * The following routines are SN Platform specific, called when
133  * a reference is made to readX/writeX set macros.  SN Platform
134  * readX set of macros ensures that Posted DMA writes on the
135  * Bridge is flushed.
136  *
137  * The routines should be self explainatory.
138  */
139 
140 static inline unsigned char
___sn_readb(const volatile void __iomem * addr)141 ___sn_readb (const volatile void __iomem *addr)
142 {
143 	unsigned char val;
144 
145 	val = *(volatile unsigned char __force *)addr;
146 	__sn_mf_a();
147 	sn_dma_flush((unsigned long)addr);
148         return val;
149 }
150 
151 static inline unsigned short
___sn_readw(const volatile void __iomem * addr)152 ___sn_readw (const volatile void __iomem *addr)
153 {
154 	unsigned short val;
155 
156 	val = *(volatile unsigned short __force *)addr;
157 	__sn_mf_a();
158 	sn_dma_flush((unsigned long)addr);
159         return val;
160 }
161 
162 static inline unsigned int
___sn_readl(const volatile void __iomem * addr)163 ___sn_readl (const volatile void __iomem *addr)
164 {
165 	unsigned int val;
166 
167 	val = *(volatile unsigned int __force *)addr;
168 	__sn_mf_a();
169 	sn_dma_flush((unsigned long)addr);
170         return val;
171 }
172 
173 static inline unsigned long
___sn_readq(const volatile void __iomem * addr)174 ___sn_readq (const volatile void __iomem *addr)
175 {
176 	unsigned long val;
177 
178 	val = *(volatile unsigned long __force *)addr;
179 	__sn_mf_a();
180 	sn_dma_flush((unsigned long)addr);
181         return val;
182 }
183 
184 /*
185  * For generic and SN2 kernels, we have a set of fast access
186  * PIO macros.	These macros are provided on SN Platform
187  * because the normal inX and readX macros perform an
188  * additional task of flushing Post DMA request on the Bridge.
189  *
190  * These routines should be self explainatory.
191  */
192 
193 static inline unsigned int
sn_inb_fast(unsigned long port)194 sn_inb_fast (unsigned long port)
195 {
196 	volatile unsigned char *addr = (unsigned char *)port;
197 	unsigned char ret;
198 
199 	ret = *addr;
200 	__sn_mf_a();
201 	return ret;
202 }
203 
204 static inline unsigned int
sn_inw_fast(unsigned long port)205 sn_inw_fast (unsigned long port)
206 {
207 	volatile unsigned short *addr = (unsigned short *)port;
208 	unsigned short ret;
209 
210 	ret = *addr;
211 	__sn_mf_a();
212 	return ret;
213 }
214 
215 static inline unsigned int
sn_inl_fast(unsigned long port)216 sn_inl_fast (unsigned long port)
217 {
218 	volatile unsigned int *addr = (unsigned int *)port;
219 	unsigned int ret;
220 
221 	ret = *addr;
222 	__sn_mf_a();
223 	return ret;
224 }
225 
226 static inline unsigned char
___sn_readb_relaxed(const volatile void __iomem * addr)227 ___sn_readb_relaxed (const volatile void __iomem *addr)
228 {
229 	return *(volatile unsigned char __force *)addr;
230 }
231 
232 static inline unsigned short
___sn_readw_relaxed(const volatile void __iomem * addr)233 ___sn_readw_relaxed (const volatile void __iomem *addr)
234 {
235 	return *(volatile unsigned short __force *)addr;
236 }
237 
238 static inline unsigned int
___sn_readl_relaxed(const volatile void __iomem * addr)239 ___sn_readl_relaxed (const volatile void __iomem *addr)
240 {
241 	return *(volatile unsigned int __force *) addr;
242 }
243 
244 static inline unsigned long
___sn_readq_relaxed(const volatile void __iomem * addr)245 ___sn_readq_relaxed (const volatile void __iomem *addr)
246 {
247 	return *(volatile unsigned long __force *) addr;
248 }
249 
250 struct pci_dev;
251 
252 static inline int
sn_pci_set_vchan(struct pci_dev * pci_dev,unsigned long * addr,int vchan)253 sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
254 {
255 
256 	if (vchan > 1) {
257 		return -1;
258 	}
259 
260 	if (!(*addr >> 32))	/* Using a mask here would be cleaner */
261 		return 0;	/* but this generates better code */
262 
263 	if (vchan == 1) {
264 		/* Set Bit 57 */
265 		*addr |= (1UL << 57);
266 	} else {
267 		/* Clear Bit 57 */
268 		*addr &= ~(1UL << 57);
269 	}
270 
271 	return 0;
272 }
273 
274 #endif	/* _ASM_SN_IO_H */
275