1 #ifndef __SPARC64_SYSTEM_H
2 #define __SPARC64_SYSTEM_H
3
4 #include <asm/ptrace.h>
5 #include <asm/processor.h>
6 #include <asm/visasm.h>
7
8 #ifndef __ASSEMBLY__
9
10 #include <linux/irqflags.h>
11 #include <asm-generic/cmpxchg-local.h>
12
13 /*
14 * Sparc (general) CPU types
15 */
16 enum sparc_cpu {
17 sun4 = 0x00,
18 sun4c = 0x01,
19 sun4m = 0x02,
20 sun4d = 0x03,
21 sun4e = 0x04,
22 sun4u = 0x05, /* V8 ploos ploos */
23 sun_unknown = 0x06,
24 ap1000 = 0x07, /* almost a sun4m */
25 };
26
27 #define sparc_cpu_model sun4u
28
29 /* This cannot ever be a sun4c :) That's just history. */
30 #define ARCH_SUN4C 0
31
32 extern const char *sparc_cpu_type;
33 extern const char *sparc_fpu_type;
34 extern const char *sparc_pmu_type;
35
36 extern char reboot_command[];
37
38 /* These are here in an effort to more fully work around Spitfire Errata
39 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
40 * branch, the chip can stop executing instructions until a trap occurs.
41 * Therefore, if interrupts are disabled, the chip can hang forever.
42 *
43 * It used to be believed that the memory barrier had to be right in the
44 * delay slot, but a case has been traced recently wherein the memory barrier
45 * was one instruction after the branch delay slot and the chip still hung.
46 * The offending sequence was the following in sym_wakeup_done() of the
47 * sym53c8xx_2 driver:
48 *
49 * call sym_ccb_from_dsa, 0
50 * movge %icc, 0, %l0
51 * brz,pn %o0, .LL1303
52 * mov %o0, %l2
53 * membar #LoadLoad
54 *
55 * The branch has to be mispredicted for the bug to occur. Therefore, we put
56 * the memory barrier explicitly into a "branch always, predicted taken"
57 * delay slot to avoid the problem case.
58 */
59 #define membar_safe(type) \
60 do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
61 " membar " type "\n" \
62 "1:\n" \
63 : : : "memory"); \
64 } while (0)
65
66 /* The kernel always executes in TSO memory model these days,
67 * and furthermore most sparc64 chips implement more stringent
68 * memory ordering than required by the specifications.
69 */
70 #define mb() membar_safe("#StoreLoad")
71 #define rmb() __asm__ __volatile__("":::"memory")
72 #define wmb() __asm__ __volatile__("":::"memory")
73
74 #endif
75
76 #define nop() __asm__ __volatile__ ("nop")
77
78 #define read_barrier_depends() do { } while(0)
79 #define set_mb(__var, __value) \
80 do { __var = __value; membar_safe("#StoreLoad"); } while(0)
81
82 #ifdef CONFIG_SMP
83 #define smp_mb() mb()
84 #define smp_rmb() rmb()
85 #define smp_wmb() wmb()
86 #else
87 #define smp_mb() __asm__ __volatile__("":::"memory")
88 #define smp_rmb() __asm__ __volatile__("":::"memory")
89 #define smp_wmb() __asm__ __volatile__("":::"memory")
90 #endif
91
92 #define smp_read_barrier_depends() do { } while(0)
93
94 #define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
95
96 #define flushw_all() __asm__ __volatile__("flushw")
97
98 /* Performance counter register access. */
99 #define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
100 #define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
101 #define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
102
103 /* Blackbird errata workaround. See commentary in
104 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
105 * for more information.
106 */
107 #define write_pic(__p) \
108 __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \
109 " nop\n\t" \
110 ".align 64\n" \
111 "99:wr %0, 0x0, %%pic\n\t" \
112 "rd %%pic, %%g0" : : "r" (__p))
113 #define reset_pic() write_pic(0)
114
115 #ifndef __ASSEMBLY__
116
117 extern void sun_do_break(void);
118 extern int stop_a_enabled;
119 extern int scons_pwroff;
120
121 extern void fault_in_user_windows(void);
122 extern void synchronize_user_stack(void);
123
124 extern void __flushw_user(void);
125 #define flushw_user() __flushw_user()
126
127 #define flush_user_windows flushw_user
128 #define flush_register_windows flushw_all
129
130 /* Don't hold the runqueue lock over context switch */
131 #define __ARCH_WANT_UNLOCKED_CTXSW
132 #define prepare_arch_switch(next) \
133 do { \
134 flushw_all(); \
135 } while (0)
136
137 /* See what happens when you design the chip correctly?
138 *
139 * We tell gcc we clobber all non-fixed-usage registers except
140 * for l0/l1. It will use one for 'next' and the other to hold
141 * the output value of 'last'. 'next' is not referenced again
142 * past the invocation of switch_to in the scheduler, so we need
143 * not preserve it's value. Hairy, but it lets us remove 2 loads
144 * and 2 stores in this critical code path. -DaveM
145 */
146 #define switch_to(prev, next, last) \
147 do { flush_tlb_pending(); \
148 save_and_clear_fpu(); \
149 /* If you are tempted to conditionalize the following */ \
150 /* so that ASI is only written if it changes, think again. */ \
151 __asm__ __volatile__("wr %%g0, %0, %%asi" \
152 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
153 trap_block[current_thread_info()->cpu].thread = \
154 task_thread_info(next); \
155 __asm__ __volatile__( \
156 "mov %%g4, %%g7\n\t" \
157 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
158 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
159 "rdpr %%wstate, %%o5\n\t" \
160 "stx %%o6, [%%g6 + %6]\n\t" \
161 "stb %%o5, [%%g6 + %5]\n\t" \
162 "rdpr %%cwp, %%o5\n\t" \
163 "stb %%o5, [%%g6 + %8]\n\t" \
164 "wrpr %%g0, 15, %%pil\n\t" \
165 "mov %4, %%g6\n\t" \
166 "ldub [%4 + %8], %%g1\n\t" \
167 "wrpr %%g1, %%cwp\n\t" \
168 "ldx [%%g6 + %6], %%o6\n\t" \
169 "ldub [%%g6 + %5], %%o5\n\t" \
170 "ldub [%%g6 + %7], %%o7\n\t" \
171 "wrpr %%o5, 0x0, %%wstate\n\t" \
172 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
173 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
174 "ldx [%%g6 + %9], %%g4\n\t" \
175 "wrpr %%g0, 14, %%pil\n\t" \
176 "brz,pt %%o7, switch_to_pc\n\t" \
177 " mov %%g7, %0\n\t" \
178 "sethi %%hi(ret_from_syscall), %%g1\n\t" \
179 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
180 " nop\n\t" \
181 ".globl switch_to_pc\n\t" \
182 "switch_to_pc:\n\t" \
183 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
184 "=r" (__local_per_cpu_offset) \
185 : "0" (task_thread_info(next)), \
186 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
187 "i" (TI_CWP), "i" (TI_TASK) \
188 : "cc", \
189 "g1", "g2", "g3", "g7", \
190 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
191 "i0", "i1", "i2", "i3", "i4", "i5", \
192 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
193 } while(0)
194
xchg32(__volatile__ unsigned int * m,unsigned int val)195 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
196 {
197 unsigned long tmp1, tmp2;
198
199 __asm__ __volatile__(
200 " mov %0, %1\n"
201 "1: lduw [%4], %2\n"
202 " cas [%4], %2, %0\n"
203 " cmp %2, %0\n"
204 " bne,a,pn %%icc, 1b\n"
205 " mov %1, %0\n"
206 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
207 : "0" (val), "r" (m)
208 : "cc", "memory");
209 return val;
210 }
211
xchg64(__volatile__ unsigned long * m,unsigned long val)212 static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
213 {
214 unsigned long tmp1, tmp2;
215
216 __asm__ __volatile__(
217 " mov %0, %1\n"
218 "1: ldx [%4], %2\n"
219 " casx [%4], %2, %0\n"
220 " cmp %2, %0\n"
221 " bne,a,pn %%xcc, 1b\n"
222 " mov %1, %0\n"
223 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
224 : "0" (val), "r" (m)
225 : "cc", "memory");
226 return val;
227 }
228
229 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
230
231 extern void __xchg_called_with_bad_pointer(void);
232
__xchg(unsigned long x,__volatile__ void * ptr,int size)233 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
234 int size)
235 {
236 switch (size) {
237 case 4:
238 return xchg32(ptr, x);
239 case 8:
240 return xchg64(ptr, x);
241 };
242 __xchg_called_with_bad_pointer();
243 return x;
244 }
245
246 extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
247
248 /*
249 * Atomic compare and exchange. Compare OLD with MEM, if identical,
250 * store NEW in MEM. Return the initial value in MEM. Success is
251 * indicated by comparing RETURN with OLD.
252 */
253
254 #define __HAVE_ARCH_CMPXCHG 1
255
256 static inline unsigned long
__cmpxchg_u32(volatile int * m,int old,int new)257 __cmpxchg_u32(volatile int *m, int old, int new)
258 {
259 __asm__ __volatile__("cas [%2], %3, %0"
260 : "=&r" (new)
261 : "0" (new), "r" (m), "r" (old)
262 : "memory");
263
264 return new;
265 }
266
267 static inline unsigned long
__cmpxchg_u64(volatile long * m,unsigned long old,unsigned long new)268 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
269 {
270 __asm__ __volatile__("casx [%2], %3, %0"
271 : "=&r" (new)
272 : "0" (new), "r" (m), "r" (old)
273 : "memory");
274
275 return new;
276 }
277
278 /* This function doesn't exist, so you'll get a linker error
279 if something tries to do an invalid cmpxchg(). */
280 extern void __cmpxchg_called_with_bad_pointer(void);
281
282 static inline unsigned long
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new,int size)283 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
284 {
285 switch (size) {
286 case 4:
287 return __cmpxchg_u32(ptr, old, new);
288 case 8:
289 return __cmpxchg_u64(ptr, old, new);
290 }
291 __cmpxchg_called_with_bad_pointer();
292 return old;
293 }
294
295 #define cmpxchg(ptr,o,n) \
296 ({ \
297 __typeof__(*(ptr)) _o_ = (o); \
298 __typeof__(*(ptr)) _n_ = (n); \
299 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
300 (unsigned long)_n_, sizeof(*(ptr))); \
301 })
302
303 /*
304 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
305 * them available.
306 */
307
__cmpxchg_local(volatile void * ptr,unsigned long old,unsigned long new,int size)308 static inline unsigned long __cmpxchg_local(volatile void *ptr,
309 unsigned long old,
310 unsigned long new, int size)
311 {
312 switch (size) {
313 case 4:
314 case 8: return __cmpxchg(ptr, old, new, size);
315 default:
316 return __cmpxchg_local_generic(ptr, old, new, size);
317 }
318
319 return old;
320 }
321
322 #define cmpxchg_local(ptr, o, n) \
323 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
324 (unsigned long)(n), sizeof(*(ptr))))
325 #define cmpxchg64_local(ptr, o, n) \
326 ({ \
327 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
328 cmpxchg_local((ptr), (o), (n)); \
329 })
330
331 #endif /* !(__ASSEMBLY__) */
332
333 #define arch_align_stack(x) (x)
334
335 #endif /* !(__SPARC64_SYSTEM_H) */
336