1 #ifndef __SPARC_SYSTEM_H
2 #define __SPARC_SYSTEM_H
3
4 #include <linux/kernel.h>
5 #include <linux/threads.h> /* NR_CPUS */
6 #include <linux/thread_info.h>
7
8 #include <asm/page.h>
9 #include <asm/psr.h>
10 #include <asm/ptrace.h>
11 #include <asm/btfixup.h>
12 #include <asm/smp.h>
13
14 #ifndef __ASSEMBLY__
15
16 #include <linux/irqflags.h>
17
probe_irq_mask(unsigned long val)18 static inline unsigned int probe_irq_mask(unsigned long val)
19 {
20 return 0;
21 }
22
23 /*
24 * Sparc (general) CPU types
25 */
26 enum sparc_cpu {
27 sun4 = 0x00,
28 sun4c = 0x01,
29 sun4m = 0x02,
30 sun4d = 0x03,
31 sun4e = 0x04,
32 sun4u = 0x05, /* V8 ploos ploos */
33 sun_unknown = 0x06,
34 ap1000 = 0x07, /* almost a sun4m */
35 sparc_leon = 0x08, /* Leon SoC */
36 };
37
38 /* Really, userland should not be looking at any of this... */
39 #ifdef __KERNEL__
40
41 extern enum sparc_cpu sparc_cpu_model;
42
43 #define ARCH_SUN4C (sparc_cpu_model==sun4c)
44
45 #define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
46
47 extern char reboot_command[];
48
49 extern struct thread_info *current_set[NR_CPUS];
50
51 extern unsigned long empty_bad_page;
52 extern unsigned long empty_bad_page_table;
53 extern unsigned long empty_zero_page;
54
55 extern void sun_do_break(void);
56 extern int serial_console;
57 extern int stop_a_enabled;
58 extern int scons_pwroff;
59
con_is_present(void)60 static inline int con_is_present(void)
61 {
62 return serial_console ? 0 : 1;
63 }
64
65 /* When a context switch happens we must flush all user windows so that
66 * the windows of the current process are flushed onto its stack. This
67 * way the windows are all clean for the next process and the stack
68 * frames are up to date.
69 */
70 extern void flush_user_windows(void);
71 extern void kill_user_windows(void);
72 extern void synchronize_user_stack(void);
73 extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
74 void *fpqueue, unsigned long *fpqdepth);
75
76 #ifdef CONFIG_SMP
77 #define SWITCH_ENTER(prv) \
78 do { \
79 if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
80 put_psr(get_psr() | PSR_EF); \
81 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
82 &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
83 clear_tsk_thread_flag(prv, TIF_USEDFPU); \
84 (prv)->thread.kregs->psr &= ~PSR_EF; \
85 } \
86 } while(0)
87
88 #define SWITCH_DO_LAZY_FPU(next) /* */
89 #else
90 #define SWITCH_ENTER(prv) /* */
91 #define SWITCH_DO_LAZY_FPU(nxt) \
92 do { \
93 if (last_task_used_math != (nxt)) \
94 (nxt)->thread.kregs->psr&=~PSR_EF; \
95 } while(0)
96 #endif
97
98 extern void flushw_all(void);
99
100 /*
101 * Flush windows so that the VM switch which follows
102 * would not pull the stack from under us.
103 *
104 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
105 * XXX WTF is the above comment? Found in late teen 2.4.x.
106 */
107 #define prepare_arch_switch(next) do { \
108 __asm__ __volatile__( \
109 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
110 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
111 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
112 "save %sp, -0x40, %sp\n\t" \
113 "restore; restore; restore; restore; restore; restore; restore"); \
114 } while(0)
115
116 /* Much care has gone into this code, do not touch it.
117 *
118 * We need to loadup regs l0/l1 for the newly forked child
119 * case because the trap return path relies on those registers
120 * holding certain values, gcc is told that they are clobbered.
121 * Gcc needs registers for 3 values in and 1 value out, so we
122 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
123 *
124 * Hey Dave, that do not touch sign is too much of an incentive
125 * - Anton & Pete
126 */
127 #define switch_to(prev, next, last) do { \
128 SWITCH_ENTER(prev); \
129 SWITCH_DO_LAZY_FPU(next); \
130 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \
131 __asm__ __volatile__( \
132 "sethi %%hi(here - 0x8), %%o7\n\t" \
133 "mov %%g6, %%g3\n\t" \
134 "or %%o7, %%lo(here - 0x8), %%o7\n\t" \
135 "rd %%psr, %%g4\n\t" \
136 "std %%sp, [%%g6 + %4]\n\t" \
137 "rd %%wim, %%g5\n\t" \
138 "wr %%g4, 0x20, %%psr\n\t" \
139 "nop\n\t" \
140 "std %%g4, [%%g6 + %3]\n\t" \
141 "ldd [%2 + %3], %%g4\n\t" \
142 "mov %2, %%g6\n\t" \
143 ".globl patchme_store_new_current\n" \
144 "patchme_store_new_current:\n\t" \
145 "st %2, [%1]\n\t" \
146 "wr %%g4, 0x20, %%psr\n\t" \
147 "nop\n\t" \
148 "nop\n\t" \
149 "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \
150 "ldd [%%g6 + %4], %%sp\n\t" \
151 "wr %%g5, 0x0, %%wim\n\t" \
152 "ldd [%%sp + 0x00], %%l0\n\t" \
153 "ldd [%%sp + 0x38], %%i6\n\t" \
154 "wr %%g4, 0x0, %%psr\n\t" \
155 "nop\n\t" \
156 "nop\n\t" \
157 "jmpl %%o7 + 0x8, %%g0\n\t" \
158 " ld [%%g3 + %5], %0\n\t" \
159 "here:\n" \
160 : "=&r" (last) \
161 : "r" (&(current_set[hard_smp_processor_id()])), \
162 "r" (task_thread_info(next)), \
163 "i" (TI_KPSR), \
164 "i" (TI_KSP), \
165 "i" (TI_TASK) \
166 : "g1", "g2", "g3", "g4", "g5", "g7", \
167 "l0", "l1", "l3", "l4", "l5", "l6", "l7", \
168 "i0", "i1", "i2", "i3", "i4", "i5", \
169 "o0", "o1", "o2", "o3", "o7"); \
170 } while(0)
171
172 /* XXX Change this if we ever use a PSO mode kernel. */
173 #define mb() __asm__ __volatile__ ("" : : : "memory")
174 #define rmb() mb()
175 #define wmb() mb()
176 #define read_barrier_depends() do { } while(0)
177 #define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
178 #define smp_mb() __asm__ __volatile__("":::"memory")
179 #define smp_rmb() __asm__ __volatile__("":::"memory")
180 #define smp_wmb() __asm__ __volatile__("":::"memory")
181 #define smp_read_barrier_depends() do { } while(0)
182
183 #define nop() __asm__ __volatile__ ("nop")
184
185 /* This has special calling conventions */
186 #ifndef CONFIG_SMP
BTFIXUPDEF_CALL(void,___xchg32,void)187 BTFIXUPDEF_CALL(void, ___xchg32, void)
188 #endif
189
190 static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
191 {
192 #ifdef CONFIG_SMP
193 __asm__ __volatile__("swap [%2], %0"
194 : "=&r" (val)
195 : "0" (val), "r" (m)
196 : "memory");
197 return val;
198 #else
199 register unsigned long *ptr asm("g1");
200 register unsigned long ret asm("g2");
201
202 ptr = (unsigned long *) m;
203 ret = val;
204
205 /* Note: this is magic and the nop there is
206 really needed. */
207 __asm__ __volatile__(
208 "mov %%o7, %%g4\n\t"
209 "call ___f____xchg32\n\t"
210 " nop\n\t"
211 : "=&r" (ret)
212 : "0" (ret), "r" (ptr)
213 : "g3", "g4", "g7", "memory", "cc");
214
215 return ret;
216 #endif
217 }
218
219 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
220
221 extern void __xchg_called_with_bad_pointer(void);
222
__xchg(unsigned long x,__volatile__ void * ptr,int size)223 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
224 {
225 switch (size) {
226 case 4:
227 return xchg_u32(ptr, x);
228 };
229 __xchg_called_with_bad_pointer();
230 return x;
231 }
232
233 /* Emulate cmpxchg() the same way we emulate atomics,
234 * by hashing the object address and indexing into an array
235 * of spinlocks to get a bit of performance...
236 *
237 * See arch/sparc/lib/atomic32.c for implementation.
238 *
239 * Cribbed from <asm-parisc/atomic.h>
240 */
241 #define __HAVE_ARCH_CMPXCHG 1
242
243 /* bug catcher for when unsupported size is used - won't link */
244 extern void __cmpxchg_called_with_bad_pointer(void);
245 /* we only need to support cmpxchg of a u32 on sparc */
246 extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
247
248 /* don't worry...optimizer will get rid of most of this */
249 static inline unsigned long
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new_,int size)250 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
251 {
252 switch (size) {
253 case 4:
254 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
255 default:
256 __cmpxchg_called_with_bad_pointer();
257 break;
258 }
259 return old;
260 }
261
262 #define cmpxchg(ptr, o, n) \
263 ({ \
264 __typeof__(*(ptr)) _o_ = (o); \
265 __typeof__(*(ptr)) _n_ = (n); \
266 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
267 (unsigned long)_n_, sizeof(*(ptr))); \
268 })
269
270 #include <asm-generic/cmpxchg-local.h>
271
272 /*
273 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
274 * them available.
275 */
276 #define cmpxchg_local(ptr, o, n) \
277 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
278 (unsigned long)(n), sizeof(*(ptr))))
279 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
280
281 extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
282
283 #endif /* __KERNEL__ */
284
285 #endif /* __ASSEMBLY__ */
286
287 #define arch_align_stack(x) (x)
288
289 #endif /* !(__SPARC_SYSTEM_H) */
290