1 /* $Id: system.h,v 1.86 2001/10/30 04:57:10 davem Exp $ */
2 #include <linux/config.h>
3 
4 #ifndef __SPARC_SYSTEM_H
5 #define __SPARC_SYSTEM_H
6 
7 #include <linux/kernel.h>
8 
9 #include <asm/segment.h>
10 
11 #ifdef __KERNEL__
12 #include <asm/page.h>
13 #include <asm/psr.h>
14 #include <asm/ptrace.h>
15 #include <asm/btfixup.h>
16 
17 #endif /* __KERNEL__ */
18 
19 #ifndef __ASSEMBLY__
20 
21 /*
22  * Sparc (general) CPU types
23  */
24 enum sparc_cpu {
25   sun4        = 0x00,
26   sun4c       = 0x01,
27   sun4m       = 0x02,
28   sun4d       = 0x03,
29   sun4e       = 0x04,
30   sun4u       = 0x05, /* V8 ploos ploos */
31   sun_unknown = 0x06,
32   ap1000      = 0x07, /* almost a sun4m */
33 };
34 
35 /* Really, userland should not be looking at any of this... */
36 #ifdef __KERNEL__
37 
38 extern enum sparc_cpu sparc_cpu_model;
39 
40 #ifndef CONFIG_SUN4
41 #define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c)
42 #define ARCH_SUN4 0
43 #else
44 #define ARCH_SUN4C_SUN4 1
45 #define ARCH_SUN4 1
46 #endif
47 
48 #define SUN4M_NCPUS            4              /* Architectural limit of sun4m. */
49 
50 extern unsigned long empty_bad_page;
51 extern unsigned long empty_bad_page_table;
52 extern unsigned long empty_zero_page;
53 
54 /* When a context switch happens we must flush all user windows so that
55  * the windows of the current process are flushed onto its stack. This
56  * way the windows are all clean for the next process and the stack
57  * frames are up to date.
58  */
59 extern void flush_user_windows(void);
60 extern void kill_user_windows(void);
61 extern void synchronize_user_stack(void);
62 extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
63 		   void *fpqueue, unsigned long *fpqdepth);
64 
65 #ifdef CONFIG_SMP
66 #define SWITCH_ENTER \
67 	if(prev->flags & PF_USEDFPU) { \
68 		put_psr(get_psr() | PSR_EF); \
69 		fpsave(&prev->thread.float_regs[0], &prev->thread.fsr, \
70 		       &prev->thread.fpqueue[0], &prev->thread.fpqdepth); \
71 		prev->flags &= ~PF_USEDFPU; \
72 		prev->thread.kregs->psr &= ~PSR_EF; \
73 	}
74 
75 #define SWITCH_DO_LAZY_FPU
76 #else
77 #define SWITCH_ENTER
78 #define SWITCH_DO_LAZY_FPU if(last_task_used_math != next) next->thread.kregs->psr&=~PSR_EF;
79 #endif
80 
81 /*
82  * Flush windows so that the VM switch which follows
83  * would not pull the stack from under us.
84  *
85  * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
86  */
87 #define prepare_to_switch() do { \
88 	__asm__ __volatile__( \
89 	".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
90 	"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
91 	"save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
92 	"save %sp, -0x40, %sp\n\t" \
93 	"restore; restore; restore; restore; restore; restore; restore"); \
94 } while(0)
95 
96 	/* Much care has gone into this code, do not touch it.
97 	 *
98 	 * We need to loadup regs l0/l1 for the newly forked child
99 	 * case because the trap return path relies on those registers
100 	 * holding certain values, gcc is told that they are clobbered.
101 	 * Gcc needs registers for 3 values in and 1 value out, so we
102 	 * clobber every non-fixed-usage register besides l2/l3/o4/o5.  -DaveM
103 	 *
104 	 * Hey Dave, that do not touch sign is too much of an incentive
105 	 * - Anton
106 	 */
107 #define switch_to(prev, next, last) do {						\
108 	extern struct task_struct *current_set[NR_CPUS];				\
109 	SWITCH_ENTER									\
110 	SWITCH_DO_LAZY_FPU								\
111 	next->active_mm->cpu_vm_mask |= (1 << smp_processor_id());			\
112 	__asm__ __volatile__(								\
113 	"sethi	%%hi(here - 0x8), %%o7\n\t"						\
114 	"mov	%%g6, %%g3\n\t"								\
115 	"or	%%o7, %%lo(here - 0x8), %%o7\n\t"					\
116 	"rd	%%psr, %%g4\n\t"							\
117 	"std	%%sp, [%%g6 + %4]\n\t"							\
118 	"rd	%%wim, %%g5\n\t"							\
119 	"wr	%%g4, 0x20, %%psr\n\t"							\
120 	"nop\n\t"									\
121 	"std	%%g4, [%%g6 + %3]\n\t"							\
122 	"ldd	[%2 + %3], %%g4\n\t"							\
123 	"mov	%2, %%g6\n\t"								\
124 	".globl	patchme_store_new_current\n"						\
125 "patchme_store_new_current:\n\t"							\
126 	"st	%2, [%1]\n\t"								\
127 	"wr	%%g4, 0x20, %%psr\n\t"							\
128 	"nop\n\t"									\
129 	"nop\n\t"									\
130 	"nop\n\t"	/* LEON needs this: load to %sp depends on CWP. */		\
131 	"ldd	[%%g6 + %4], %%sp\n\t"							\
132 	"wr	%%g5, 0x0, %%wim\n\t"							\
133 	"ldd	[%%sp + 0x00], %%l0\n\t"						\
134 	"ldd	[%%sp + 0x38], %%i6\n\t"						\
135 	"wr	%%g4, 0x0, %%psr\n\t"							\
136 	"nop\n\t"									\
137 	"nop\n\t"									\
138 	"jmpl	%%o7 + 0x8, %%g0\n\t"							\
139 	" mov	%%g3, %0\n"								\
140 	"here:\n"									\
141         : "=&r" (last)									\
142         : "r" (&(current_set[hard_smp_processor_id()])), "r" (next),			\
143 	  "i" ((const unsigned long)(&((struct task_struct *)0)->thread.kpsr)),		\
144 	  "i" ((const unsigned long)(&((struct task_struct *)0)->thread.ksp))		\
145 	: "g1", "g2", "g3", "g4", "g5", "g7", "l0", "l1", "l3",				\
146 	"l4", "l5", "l6", "l7", "i0", "i1", "i2", "i3", "i4", "i5", "o0", "o1", "o2",	\
147 	"o3", "o7");									\
148 	} while(0)
149 
150 /*
151  * Changing the IRQ level on the Sparc.
152  */
setipl(unsigned long __orig_psr)153 extern __inline__ void setipl(unsigned long __orig_psr)
154 {
155 	__asm__ __volatile__(
156 		"wr	%0, 0x0, %%psr\n\t"
157 		"nop; nop; nop\n"
158 		: /* no outputs */
159 		: "r" (__orig_psr)
160 		: "memory", "cc");
161 }
162 
__cli(void)163 extern __inline__ void __cli(void)
164 {
165 	unsigned long tmp;
166 
167 	__asm__ __volatile__(
168 		"rd	%%psr, %0\n\t"
169 		"nop; nop; nop;\n\t"	/* Sun4m + Cypress + SMP bug */
170 		"or	%0, %1, %0\n\t"
171 		"wr	%0, 0x0, %%psr\n\t"
172 		"nop; nop; nop\n"
173 		: "=r" (tmp)
174 		: "i" (PSR_PIL)
175 		: "memory");
176 }
177 
__sti(void)178 extern __inline__ void __sti(void)
179 {
180 	unsigned long tmp;
181 
182 	__asm__ __volatile__(
183 		"rd	%%psr, %0\n\t"
184 		"nop; nop; nop;\n\t"	/* Sun4m + Cypress + SMP bug */
185 		"andn	%0, %1, %0\n\t"
186 		"wr	%0, 0x0, %%psr\n\t"
187 		"nop; nop; nop\n"
188 		: "=r" (tmp)
189 		: "i" (PSR_PIL)
190 		: "memory");
191 }
192 
getipl(void)193 extern __inline__ unsigned long getipl(void)
194 {
195 	unsigned long retval;
196 
197 	__asm__ __volatile__("rd	%%psr, %0" : "=r" (retval));
198 	return retval;
199 }
200 
swap_pil(unsigned long __new_psr)201 extern __inline__ unsigned long swap_pil(unsigned long __new_psr)
202 {
203 	unsigned long retval;
204 
205 	__asm__ __volatile__(
206 		"rd	%%psr, %0\n\t"
207 		"nop; nop; nop;\n\t"	/* Sun4m + Cypress + SMP bug */
208 		"and	%0, %2, %%g1\n\t"
209 		"and	%1, %2, %%g2\n\t"
210 		"xorcc	%%g1, %%g2, %%g0\n\t"
211 		"be	1f\n\t"
212 		" nop\n\t"
213 		"wr	%0, %2, %%psr\n\t"
214 		"nop; nop; nop;\n"
215 		"1:\n"
216 		: "=&r" (retval)
217 		: "r" (__new_psr), "i" (PSR_PIL)
218 		: "g1", "g2", "memory", "cc");
219 
220 	return retval;
221 }
222 
read_psr_and_cli(void)223 extern __inline__ unsigned long read_psr_and_cli(void)
224 {
225 	unsigned long retval;
226 
227 	__asm__ __volatile__(
228 		"rd	%%psr, %0\n\t"
229 		"nop; nop; nop;\n\t"	/* Sun4m + Cypress + SMP bug */
230 		"or	%0, %1, %%g1\n\t"
231 		"wr	%%g1, 0x0, %%psr\n\t"
232 		"nop; nop; nop\n\t"
233 		: "=r" (retval)
234 		: "i" (PSR_PIL)
235 		: "g1", "memory");
236 
237 	return retval;
238 }
239 
read_psr_and_sti(void)240 extern __inline__ unsigned long read_psr_and_sti(void)
241 {
242 	unsigned long retval;
243 
244 	__asm__ __volatile__(
245 		"rd	%%psr, %0\n\t"
246 		"nop; nop; nop;\n\t"	/* Sun4m + Cypress + SMP bug */
247 		"andn	%0, %1, %%g1\n\t"
248 		"wr	%%g1, 0x0, %%psr\n\t"
249 		"nop; nop; nop\n\t"
250 		: "=r" (retval)
251 		: "i" (PSR_PIL)
252 		: "g1", "memory");
253 
254 	return retval;
255 }
256 
257 #define __save_flags(flags)	((flags) = getipl())
258 #define __save_and_cli(flags)	((flags) = read_psr_and_cli())
259 #define __save_and_sti(flags)	((flags) = read_psr_and_sti())
260 #define __restore_flags(flags)	setipl((flags))
261 #define local_irq_disable()		__cli()
262 #define local_irq_enable()		__sti()
263 #define local_irq_save(flags)		__save_and_cli(flags)
264 #define local_irq_set(flags)		__save_and_sti(flags)
265 #define local_irq_restore(flags)	__restore_flags(flags)
266 
267 #ifdef CONFIG_SMP
268 
269 extern unsigned char global_irq_holder;
270 
271 #define save_and_cli(flags)   do { save_flags(flags); cli(); } while(0)
272 
273 extern void __global_cli(void);
274 extern void __global_sti(void);
275 extern unsigned long __global_save_flags(void);
276 extern void __global_restore_flags(unsigned long flags);
277 #define cli()			__global_cli()
278 #define sti()			__global_sti()
279 #define save_flags(flags)	((flags)=__global_save_flags())
280 #define restore_flags(flags)	__global_restore_flags(flags)
281 
282 #else
283 
284 #define cli() __cli()
285 #define sti() __sti()
286 #define save_flags(x) __save_flags(x)
287 #define restore_flags(x) __restore_flags(x)
288 #define save_and_cli(x) __save_and_cli(x)
289 
290 #endif
291 
292 /* XXX Change this if we ever use a PSO mode kernel. */
293 #define mb()	__asm__ __volatile__ ("" : : : "memory")
294 #define rmb()	mb()
295 #define wmb()	mb()
296 #define set_mb(__var, __value)  do { __var = __value; mb(); } while(0)
297 #define set_wmb(__var, __value) set_mb(__var, __value)
298 #define smp_mb()	__asm__ __volatile__("":::"memory")
299 #define smp_rmb()	__asm__ __volatile__("":::"memory")
300 #define smp_wmb()	__asm__ __volatile__("":::"memory")
301 
302 #define nop() __asm__ __volatile__ ("nop")
303 
304 /* This has special calling conventions */
305 #ifndef CONFIG_SMP
BTFIXUPDEF_CALL(void,___xchg32,void)306 BTFIXUPDEF_CALL(void, ___xchg32, void)
307 #endif
308 
309 extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
310 {
311 #ifdef CONFIG_SMP
312 	__asm__ __volatile__("swap [%2], %0"
313 			     : "=&r" (val)
314 			     : "0" (val), "r" (m)
315 			     : "memory");
316 	return val;
317 #else
318 	register unsigned long *ptr asm("g1");
319 	register unsigned long ret asm("g2");
320 
321 	ptr = (unsigned long *) m;
322 	ret = val;
323 
324 	/* Note: this is magic and the nop there is
325 	   really needed. */
326 	__asm__ __volatile__(
327 	"mov	%%o7, %%g4\n\t"
328 	"call	___f____xchg32\n\t"
329 	" nop\n\t"
330 	: "=&r" (ret)
331 	: "0" (ret), "r" (ptr)
332 	: "g3", "g4", "g7", "memory", "cc");
333 
334 	return ret;
335 #endif
336 }
337 
338 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
339 #define tas(ptr) (xchg((ptr),1))
340 
341 extern void __xchg_called_with_bad_pointer(void);
342 
__xchg(unsigned long x,__volatile__ void * ptr,int size)343 static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
344 {
345 	switch (size) {
346 	case 4:
347 		return xchg_u32(ptr, x);
348 	};
349 	__xchg_called_with_bad_pointer();
350 	return x;
351 }
352 
353 extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
354 
355 #endif /* __KERNEL__ */
356 
357 #endif /* __ASSEMBLY__ */
358 
359 #endif /* !(__SPARC_SYSTEM_H) */
360