1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999 by Ralf Baechle
7 * Modified further for R[236]000 by Paul M. Antoine, 1996
8 * Copyright (C) 1999 Silicon Graphics
9 */
10 #ifndef _ASM_SYSTEM_H
11 #define _ASM_SYSTEM_H
12
13 #include <linux/config.h>
14 #include <asm/sgidefs.h>
15
16 #include <linux/kernel.h>
17
18 #include <asm/addrspace.h>
19 #include <asm/ptrace.h>
20
21 __asm__ (
22 ".macro\t__sti\n\t"
23 ".set\tpush\n\t"
24 ".set\treorder\n\t"
25 ".set\tnoat\n\t"
26 "mfc0\t$1,$12\n\t"
27 "ori\t$1,0x1f\n\t"
28 "xori\t$1,0x1e\n\t"
29 "mtc0\t$1,$12\n\t"
30 ".set\tpop\n\t"
31 ".endm");
32
33 static __inline__ void
__sti(void)34 __sti(void)
35 {
36 __asm__ __volatile__(
37 "__sti"
38 : /* no outputs */
39 : /* no inputs */
40 : "memory");
41 }
42
43 /*
44 * For cli() we have to insert nops to make sure that the new value
45 * has actually arrived in the status register before the end of this
46 * macro.
47 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
48 * no nops at all.
49 */
50 __asm__ (
51 ".macro\t__cli\n\t"
52 ".set\tpush\n\t"
53 ".set\treorder\n\t"
54 ".set\tnoat\n\t"
55 "mfc0\t$1,$12\n\t"
56 "ori\t$1,1\n\t"
57 "xori\t$1,1\n\t"
58 ".set\tnoreorder\n\t"
59 "mtc0\t$1,$12\n\t"
60 "sll\t$0, $0, 1\t\t\t# nop\n\t"
61 "sll\t$0, $0, 1\t\t\t# nop\n\t"
62 "sll\t$0, $0, 1\t\t\t# nop\n\t"
63 ".set\tpop\n\t"
64 ".endm");
65
66 static __inline__ void
__cli(void)67 __cli(void)
68 {
69 __asm__ __volatile__(
70 "__cli"
71 : /* no outputs */
72 : /* no inputs */
73 : "memory");
74 }
75
76 __asm__ (
77 ".macro\t__save_flags flags\n\t"
78 ".set\tpush\n\t"
79 ".set\treorder\n\t"
80 "mfc0\t\\flags, $12\n\t"
81 ".set\tpop\n\t"
82 ".endm");
83
84 #define __save_flags(x) \
85 __asm__ __volatile__( \
86 "__save_flags %0" \
87 : "=r" (x))
88
89 __asm__ (
90 ".macro\t__save_and_cli result\n\t"
91 ".set\tpush\n\t"
92 ".set\treorder\n\t"
93 ".set\tnoat\n\t"
94 "mfc0\t\\result, $12\n\t"
95 "ori\t$1, \\result, 1\n\t"
96 "xori\t$1, 1\n\t"
97 ".set\tnoreorder\n\t"
98 "mtc0\t$1, $12\n\t"
99 "sll\t$0, $0, 1\t\t\t# nop\n\t"
100 "sll\t$0, $0, 1\t\t\t# nop\n\t"
101 "sll\t$0, $0, 1\t\t\t# nop\n\t"
102 ".set\tpop\n\t"
103 ".endm");
104
105 #define __save_and_cli(x) \
106 __asm__ __volatile__( \
107 "__save_and_cli\t%0" \
108 : "=r" (x) \
109 : /* no inputs */ \
110 : "memory")
111
112 __asm__ (
113 ".macro\t__save_and_sti result\n\t"
114 ".set\tpush\n\t"
115 ".set\treorder\n\t"
116 ".set\tnoat\n\t"
117 "mfc0\t\\result, $12\n\t"
118 "ori\t$1, \\result, 1\n\t"
119 ".set\tnoreorder\n\t"
120 "mtc0\t$1, $12\n\t"
121 ".set\tpop\n\t"
122 ".endm");
123
124 #define __save_and_sti(x) \
125 __asm__ __volatile__( \
126 "__save_and_sti\t%0" \
127 : "=r" (x) \
128 : /* no inputs */ \
129 : "memory")
130
131 __asm__(".macro\t__restore_flags flags\n\t"
132 ".set\tnoreorder\n\t"
133 ".set\tnoat\n\t"
134 "mfc0\t$1, $12\n\t"
135 "andi\t\\flags, 1\n\t"
136 "ori\t$1, 1\n\t"
137 "xori\t$1, 1\n\t"
138 "or\t\\flags, $1\n\t"
139 "mtc0\t\\flags, $12\n\t"
140 "sll\t$0, $0, 1\t\t\t# nop\n\t"
141 "sll\t$0, $0, 1\t\t\t# nop\n\t"
142 "sll\t$0, $0, 1\t\t\t# nop\n\t"
143 ".set\tat\n\t"
144 ".set\treorder\n\t"
145 ".endm");
146
147 #define __restore_flags(flags) \
148 do { \
149 unsigned long __tmp1; \
150 \
151 __asm__ __volatile__( \
152 "__restore_flags\t%0" \
153 : "=r" (__tmp1) \
154 : "0" (flags) \
155 : "memory"); \
156 } while(0)
157
158 #ifdef CONFIG_SMP
159
160 extern void __global_cli(void);
161 extern void __global_sti(void);
162 extern unsigned long __global_save_flags(void);
163 extern void __global_restore_flags(unsigned long);
164 #define cli() __global_cli()
165 #define sti() __global_sti()
166 #define save_flags(x) ((x)=__global_save_flags())
167 #define restore_flags(x) __global_restore_flags(x)
168 #define save_and_cli(x) do { save_flags(x); cli(); } while(0)
169 #define save_and_sti(x) do { save_flags(x); sti(); } while(0)
170
171 #else
172
173 #define cli() __cli()
174 #define sti() __sti()
175 #define save_flags(x) __save_flags(x)
176 #define restore_flags(x) __restore_flags(x)
177 #define save_and_cli(x) __save_and_cli(x)
178 #define save_and_sti(x) __save_and_sti(x)
179
180 #endif /* CONFIG_SMP */
181
182 /* For spinlocks etc */
183 #define local_irq_save(x) __save_and_cli(x)
184 #define local_irq_set(x) __save_and_sti(x)
185 #define local_irq_restore(x) __restore_flags(x)
186 #define local_irq_disable() __cli()
187 #define local_irq_enable() __sti()
188
189 #define __sync() \
190 __asm__ __volatile__( \
191 ".set push\n\t" \
192 ".set noreorder\n\t" \
193 "sync\n\t" \
194 ".set pop" \
195 : /* no output */ \
196 : /* no input */ \
197 : "memory")
198
199 #define fast_wmb() __sync()
200 #define fast_rmb() __sync()
201 #define fast_mb() __sync()
202 #define fast_iob() \
203 do { \
204 __sync(); \
205 __asm__ __volatile__( \
206 ".set push\n\t" \
207 ".set noreorder\n\t" \
208 "lw $0,%0\n\t" \
209 "nop\n\t" \
210 ".set pop" \
211 : /* no output */ \
212 : "m" (*(int *)KSEG1) \
213 : "memory"); \
214 } while (0)
215
216 #define wmb() fast_wmb()
217 #define rmb() fast_rmb()
218 #define mb() fast_mb()
219 #define iob() fast_iob()
220
221 #ifdef CONFIG_SMP
222 #define smp_mb() mb()
223 #define smp_rmb() rmb()
224 #define smp_wmb() wmb()
225 #else
226 #define smp_mb() barrier()
227 #define smp_rmb() barrier()
228 #define smp_wmb() barrier()
229 #endif
230
231 #define set_mb(var, value) \
232 do { var = value; mb(); } while (0)
233
234 #define set_wmb(var, value) \
235 do { var = value; wmb(); } while (0)
236
237 /*
238 * switch_to(n) should switch tasks to task nr n, first
239 * checking that n isn't the current task, in which case it does nothing.
240 */
241 extern asmlinkage void *resume(void *last, void *next);
242
243 #define prepare_to_switch() do { } while(0)
244
245 struct task_struct;
246
247 #define switch_to(prev,next,last) \
248 do { \
249 (last) = resume(prev, next); \
250 } while(0)
251
xchg_u32(volatile int * m,unsigned long val)252 static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
253 {
254 unsigned long dummy;
255
256 __asm__ __volatile__(
257 ".set\tpush\t\t\t\t# xchg_u32\n\t"
258 ".set\tnoreorder\n\t"
259 ".set\tnomacro\n\t"
260 "ll\t%0, %3\n"
261 "1:\tmove\t%2, %z4\n\t"
262 "sc\t%2, %1\n\t"
263 "beqzl\t%2, 1b\n\t"
264 " ll\t%0, %3\n\t"
265 "sync\n\t"
266 ".set\tpop"
267 : "=&r" (val), "=m" (*m), "=&r" (dummy)
268 : "R" (*m), "Jr" (val)
269 : "memory");
270
271 return val;
272 }
273
xchg_u64(volatile int * m,unsigned long val)274 static __inline__ unsigned long xchg_u64(volatile int * m, unsigned long val)
275 {
276 unsigned long dummy;
277
278 __asm__ __volatile__(
279 ".set\tpush\t\t\t\t# xchg_u64\n\t"
280 ".set\tnoreorder\n\t"
281 ".set\tnomacro\n\t"
282 "lld\t%0, %3\n"
283 "1:\tmove\t%2, %z4\n\t"
284 "scd\t%2, %1\n\t"
285 "beqzl\t%2, 1b\n\t"
286 " lld\t%0, %3\n\t"
287 "sync\n\t"
288 ".set\tpop"
289 : "=&r" (val), "=m" (*m), "=&r" (dummy)
290 : "R" (*m), "Jr" (val)
291 : "memory");
292
293 return val;
294 }
295
296 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
297 #define tas(ptr) (xchg((ptr),1))
298
299
__xchg(unsigned long x,volatile void * ptr,int size)300 static inline unsigned long __xchg(unsigned long x, volatile void * ptr,
301 int size)
302 {
303 switch (size) {
304 case 4:
305 return xchg_u32(ptr, x);
306 case 8:
307 return xchg_u64(ptr, x);
308 }
309 return x;
310 }
311
312 extern void *set_except_vector(int n, void *addr);
313 extern void per_cpu_trap_init(void);
314
315 extern void __die(const char *, struct pt_regs *, const char *file,
316 const char *func, unsigned long line) __attribute__((noreturn));
317 extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
318 const char *func, unsigned long line);
319
320 #define die(msg, regs) \
321 __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
322 #define die_if_kernel(msg, regs) \
323 __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
324
325 #endif /* _ASM_SYSTEM_H */
326