1 /*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4 #ifndef _ASM_POWERPC_SYSTEM_H
5 #define _ASM_POWERPC_SYSTEM_H
6
7 #include <linux/kernel.h>
8 #include <linux/irqflags.h>
9
10 #include <asm/hw_irq.h>
11
12 /*
13 * Memory barrier.
14 * The sync instruction guarantees that all memory accesses initiated
15 * by this processor have been performed (with respect to all other
16 * mechanisms that access memory). The eieio instruction is a barrier
17 * providing an ordering (separately) for (a) cacheable stores and (b)
18 * loads and stores to non-cacheable memory (e.g. I/O devices).
19 *
20 * mb() prevents loads and stores being reordered across this point.
21 * rmb() prevents loads being reordered across this point.
22 * wmb() prevents stores being reordered across this point.
23 * read_barrier_depends() prevents data-dependent loads being reordered
24 * across this point (nop on PPC).
25 *
26 * *mb() variants without smp_ prefix must order all types of memory
27 * operations with one another. sync is the only instruction sufficient
28 * to do this.
29 *
30 * For the smp_ barriers, ordering is for cacheable memory operations
31 * only. We have to use the sync instruction for smp_mb(), since lwsync
32 * doesn't order loads with respect to previous stores. Lwsync can be
33 * used for smp_rmb() and smp_wmb().
34 *
35 * However, on CPUs that don't support lwsync, lwsync actually maps to a
36 * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
37 */
38 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
39 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
40 #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
41 #define read_barrier_depends() do { } while(0)
42
43 #define set_mb(var, value) do { var = value; mb(); } while (0)
44
45 #ifdef __KERNEL__
46 #define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
47 #ifdef CONFIG_SMP
48
49 #ifdef __SUBARCH_HAS_LWSYNC
50 # define SMPWMB LWSYNC
51 #else
52 # define SMPWMB eieio
53 #endif
54
55 #define smp_mb() mb()
56 #define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
57 #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
58 #define smp_read_barrier_depends() read_barrier_depends()
59 #else
60 #define smp_mb() barrier()
61 #define smp_rmb() barrier()
62 #define smp_wmb() barrier()
63 #define smp_read_barrier_depends() do { } while(0)
64 #endif /* CONFIG_SMP */
65
66 /*
67 * This is a barrier which prevents following instructions from being
68 * started until the value of the argument x is known. For example, if
69 * x is a variable loaded from memory, this prevents following
70 * instructions from being executed until the load has been performed.
71 */
72 #define data_barrier(x) \
73 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
74
75 struct task_struct;
76 struct pt_regs;
77
78 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
79
80 extern int (*__debugger)(struct pt_regs *regs);
81 extern int (*__debugger_ipi)(struct pt_regs *regs);
82 extern int (*__debugger_bpt)(struct pt_regs *regs);
83 extern int (*__debugger_sstep)(struct pt_regs *regs);
84 extern int (*__debugger_iabr_match)(struct pt_regs *regs);
85 extern int (*__debugger_dabr_match)(struct pt_regs *regs);
86 extern int (*__debugger_fault_handler)(struct pt_regs *regs);
87
88 #define DEBUGGER_BOILERPLATE(__NAME) \
89 static inline int __NAME(struct pt_regs *regs) \
90 { \
91 if (unlikely(__ ## __NAME)) \
92 return __ ## __NAME(regs); \
93 return 0; \
94 }
95
96 DEBUGGER_BOILERPLATE(debugger)
97 DEBUGGER_BOILERPLATE(debugger_ipi)
98 DEBUGGER_BOILERPLATE(debugger_bpt)
99 DEBUGGER_BOILERPLATE(debugger_sstep)
100 DEBUGGER_BOILERPLATE(debugger_iabr_match)
101 DEBUGGER_BOILERPLATE(debugger_dabr_match)
102 DEBUGGER_BOILERPLATE(debugger_fault_handler)
103
104 #else
105 static inline int debugger(struct pt_regs *regs) { return 0; }
106 static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
107 static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
108 static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
109 static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
110 static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
111 static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
112 #endif
113
114 extern int set_dabr(unsigned long dabr);
115 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
116 extern void do_send_trap(struct pt_regs *regs, unsigned long address,
117 unsigned long error_code, int signal_code, int brkpt);
118 #else
119 extern void do_dabr(struct pt_regs *regs, unsigned long address,
120 unsigned long error_code);
121 #endif
122 extern void print_backtrace(unsigned long *);
123 extern void show_regs(struct pt_regs * regs);
124 extern void flush_instruction_cache(void);
125 extern void hard_reset_now(void);
126 extern void poweroff_now(void);
127
128 #ifdef CONFIG_6xx
129 extern long _get_L2CR(void);
130 extern long _get_L3CR(void);
131 extern void _set_L2CR(unsigned long);
132 extern void _set_L3CR(unsigned long);
133 #else
134 #define _get_L2CR() 0L
135 #define _get_L3CR() 0L
136 #define _set_L2CR(val) do { } while(0)
137 #define _set_L3CR(val) do { } while(0)
138 #endif
139
140 extern void via_cuda_init(void);
141 extern void read_rtc_time(void);
142 extern void pmac_find_display(void);
143 extern void giveup_fpu(struct task_struct *);
144 extern void disable_kernel_fp(void);
145 extern void enable_kernel_fp(void);
146 extern void flush_fp_to_thread(struct task_struct *);
147 extern void enable_kernel_altivec(void);
148 extern void giveup_altivec(struct task_struct *);
149 extern void load_up_altivec(struct task_struct *);
150 extern int emulate_altivec(struct pt_regs *);
151 extern void __giveup_vsx(struct task_struct *);
152 extern void giveup_vsx(struct task_struct *);
153 extern void enable_kernel_spe(void);
154 extern void giveup_spe(struct task_struct *);
155 extern void load_up_spe(struct task_struct *);
156 extern int fix_alignment(struct pt_regs *);
157 extern void cvt_fd(float *from, double *to);
158 extern void cvt_df(double *from, float *to);
159
160 #ifndef CONFIG_SMP
161 extern void discard_lazy_cpu_state(void);
162 #else
discard_lazy_cpu_state(void)163 static inline void discard_lazy_cpu_state(void)
164 {
165 }
166 #endif
167
168 #ifdef CONFIG_ALTIVEC
169 extern void flush_altivec_to_thread(struct task_struct *);
170 #else
flush_altivec_to_thread(struct task_struct * t)171 static inline void flush_altivec_to_thread(struct task_struct *t)
172 {
173 }
174 #endif
175
176 #ifdef CONFIG_VSX
177 extern void flush_vsx_to_thread(struct task_struct *);
178 #else
flush_vsx_to_thread(struct task_struct * t)179 static inline void flush_vsx_to_thread(struct task_struct *t)
180 {
181 }
182 #endif
183
184 #ifdef CONFIG_SPE
185 extern void flush_spe_to_thread(struct task_struct *);
186 #else
flush_spe_to_thread(struct task_struct * t)187 static inline void flush_spe_to_thread(struct task_struct *t)
188 {
189 }
190 #endif
191
192 extern int call_rtas(const char *, int, int, unsigned long *, ...);
193 extern void cacheable_memzero(void *p, unsigned int nb);
194 extern void *cacheable_memcpy(void *, const void *, unsigned int);
195 extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
196 extern void bad_page_fault(struct pt_regs *, unsigned long, int);
197 extern int die(const char *, struct pt_regs *, long);
198 extern void _exception(int, struct pt_regs *, int, unsigned long);
199 extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
200
201 #ifdef CONFIG_BOOKE_WDT
202 extern u32 booke_wdt_enabled;
203 extern u32 booke_wdt_period;
204 #endif /* CONFIG_BOOKE_WDT */
205
206 struct device_node;
207 extern void note_scsi_host(struct device_node *, void *);
208
209 extern struct task_struct *__switch_to(struct task_struct *,
210 struct task_struct *);
211 #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
212
213 struct thread_struct;
214 extern struct task_struct *_switch(struct thread_struct *prev,
215 struct thread_struct *next);
216
217 extern unsigned int rtas_data;
218 extern int mem_init_done; /* set on boot once kmalloc can be called */
219 extern int init_bootmem_done; /* set once bootmem is available */
220 extern phys_addr_t memory_limit;
221 extern unsigned long klimit;
222
223 extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
224 extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
225
226 extern int powersave_nap; /* set if nap mode can be used in idle loop */
227
228 /*
229 * Atomic exchange
230 *
231 * Changes the memory location '*ptr' to be val and returns
232 * the previous value stored there.
233 */
234 static __always_inline unsigned long
__xchg_u32(volatile void * p,unsigned long val)235 __xchg_u32(volatile void *p, unsigned long val)
236 {
237 unsigned long prev;
238
239 __asm__ __volatile__(
240 PPC_RELEASE_BARRIER
241 "1: lwarx %0,0,%2 \n"
242 PPC405_ERR77(0,%2)
243 " stwcx. %3,0,%2 \n\
244 bne- 1b"
245 PPC_ACQUIRE_BARRIER
246 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
247 : "r" (p), "r" (val)
248 : "cc", "memory");
249
250 return prev;
251 }
252
253 /*
254 * Atomic exchange
255 *
256 * Changes the memory location '*ptr' to be val and returns
257 * the previous value stored there.
258 */
259 static __always_inline unsigned long
__xchg_u32_local(volatile void * p,unsigned long val)260 __xchg_u32_local(volatile void *p, unsigned long val)
261 {
262 unsigned long prev;
263
264 __asm__ __volatile__(
265 "1: lwarx %0,0,%2 \n"
266 PPC405_ERR77(0,%2)
267 " stwcx. %3,0,%2 \n\
268 bne- 1b"
269 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
270 : "r" (p), "r" (val)
271 : "cc", "memory");
272
273 return prev;
274 }
275
276 #ifdef CONFIG_PPC64
277 static __always_inline unsigned long
__xchg_u64(volatile void * p,unsigned long val)278 __xchg_u64(volatile void *p, unsigned long val)
279 {
280 unsigned long prev;
281
282 __asm__ __volatile__(
283 PPC_RELEASE_BARRIER
284 "1: ldarx %0,0,%2 \n"
285 PPC405_ERR77(0,%2)
286 " stdcx. %3,0,%2 \n\
287 bne- 1b"
288 PPC_ACQUIRE_BARRIER
289 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
290 : "r" (p), "r" (val)
291 : "cc", "memory");
292
293 return prev;
294 }
295
296 static __always_inline unsigned long
__xchg_u64_local(volatile void * p,unsigned long val)297 __xchg_u64_local(volatile void *p, unsigned long val)
298 {
299 unsigned long prev;
300
301 __asm__ __volatile__(
302 "1: ldarx %0,0,%2 \n"
303 PPC405_ERR77(0,%2)
304 " stdcx. %3,0,%2 \n\
305 bne- 1b"
306 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
307 : "r" (p), "r" (val)
308 : "cc", "memory");
309
310 return prev;
311 }
312 #endif
313
314 /*
315 * This function doesn't exist, so you'll get a linker error
316 * if something tries to do an invalid xchg().
317 */
318 extern void __xchg_called_with_bad_pointer(void);
319
320 static __always_inline unsigned long
__xchg(volatile void * ptr,unsigned long x,unsigned int size)321 __xchg(volatile void *ptr, unsigned long x, unsigned int size)
322 {
323 switch (size) {
324 case 4:
325 return __xchg_u32(ptr, x);
326 #ifdef CONFIG_PPC64
327 case 8:
328 return __xchg_u64(ptr, x);
329 #endif
330 }
331 __xchg_called_with_bad_pointer();
332 return x;
333 }
334
335 static __always_inline unsigned long
__xchg_local(volatile void * ptr,unsigned long x,unsigned int size)336 __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
337 {
338 switch (size) {
339 case 4:
340 return __xchg_u32_local(ptr, x);
341 #ifdef CONFIG_PPC64
342 case 8:
343 return __xchg_u64_local(ptr, x);
344 #endif
345 }
346 __xchg_called_with_bad_pointer();
347 return x;
348 }
349 #define xchg(ptr,x) \
350 ({ \
351 __typeof__(*(ptr)) _x_ = (x); \
352 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
353 })
354
355 #define xchg_local(ptr,x) \
356 ({ \
357 __typeof__(*(ptr)) _x_ = (x); \
358 (__typeof__(*(ptr))) __xchg_local((ptr), \
359 (unsigned long)_x_, sizeof(*(ptr))); \
360 })
361
362 /*
363 * Compare and exchange - if *p == old, set it to new,
364 * and return the old value of *p.
365 */
366 #define __HAVE_ARCH_CMPXCHG 1
367
368 static __always_inline unsigned long
__cmpxchg_u32(volatile unsigned int * p,unsigned long old,unsigned long new)369 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
370 {
371 unsigned int prev;
372
373 __asm__ __volatile__ (
374 PPC_RELEASE_BARRIER
375 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
376 cmpw 0,%0,%3\n\
377 bne- 2f\n"
378 PPC405_ERR77(0,%2)
379 " stwcx. %4,0,%2\n\
380 bne- 1b"
381 PPC_ACQUIRE_BARRIER
382 "\n\
383 2:"
384 : "=&r" (prev), "+m" (*p)
385 : "r" (p), "r" (old), "r" (new)
386 : "cc", "memory");
387
388 return prev;
389 }
390
391 static __always_inline unsigned long
__cmpxchg_u32_local(volatile unsigned int * p,unsigned long old,unsigned long new)392 __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
393 unsigned long new)
394 {
395 unsigned int prev;
396
397 __asm__ __volatile__ (
398 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
399 cmpw 0,%0,%3\n\
400 bne- 2f\n"
401 PPC405_ERR77(0,%2)
402 " stwcx. %4,0,%2\n\
403 bne- 1b"
404 "\n\
405 2:"
406 : "=&r" (prev), "+m" (*p)
407 : "r" (p), "r" (old), "r" (new)
408 : "cc", "memory");
409
410 return prev;
411 }
412
413 #ifdef CONFIG_PPC64
414 static __always_inline unsigned long
__cmpxchg_u64(volatile unsigned long * p,unsigned long old,unsigned long new)415 __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
416 {
417 unsigned long prev;
418
419 __asm__ __volatile__ (
420 PPC_RELEASE_BARRIER
421 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
422 cmpd 0,%0,%3\n\
423 bne- 2f\n\
424 stdcx. %4,0,%2\n\
425 bne- 1b"
426 PPC_ACQUIRE_BARRIER
427 "\n\
428 2:"
429 : "=&r" (prev), "+m" (*p)
430 : "r" (p), "r" (old), "r" (new)
431 : "cc", "memory");
432
433 return prev;
434 }
435
436 static __always_inline unsigned long
__cmpxchg_u64_local(volatile unsigned long * p,unsigned long old,unsigned long new)437 __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
438 unsigned long new)
439 {
440 unsigned long prev;
441
442 __asm__ __volatile__ (
443 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
444 cmpd 0,%0,%3\n\
445 bne- 2f\n\
446 stdcx. %4,0,%2\n\
447 bne- 1b"
448 "\n\
449 2:"
450 : "=&r" (prev), "+m" (*p)
451 : "r" (p), "r" (old), "r" (new)
452 : "cc", "memory");
453
454 return prev;
455 }
456 #endif
457
458 /* This function doesn't exist, so you'll get a linker error
459 if something tries to do an invalid cmpxchg(). */
460 extern void __cmpxchg_called_with_bad_pointer(void);
461
462 static __always_inline unsigned long
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new,unsigned int size)463 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
464 unsigned int size)
465 {
466 switch (size) {
467 case 4:
468 return __cmpxchg_u32(ptr, old, new);
469 #ifdef CONFIG_PPC64
470 case 8:
471 return __cmpxchg_u64(ptr, old, new);
472 #endif
473 }
474 __cmpxchg_called_with_bad_pointer();
475 return old;
476 }
477
478 static __always_inline unsigned long
__cmpxchg_local(volatile void * ptr,unsigned long old,unsigned long new,unsigned int size)479 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
480 unsigned int size)
481 {
482 switch (size) {
483 case 4:
484 return __cmpxchg_u32_local(ptr, old, new);
485 #ifdef CONFIG_PPC64
486 case 8:
487 return __cmpxchg_u64_local(ptr, old, new);
488 #endif
489 }
490 __cmpxchg_called_with_bad_pointer();
491 return old;
492 }
493
494 #define cmpxchg(ptr, o, n) \
495 ({ \
496 __typeof__(*(ptr)) _o_ = (o); \
497 __typeof__(*(ptr)) _n_ = (n); \
498 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
499 (unsigned long)_n_, sizeof(*(ptr))); \
500 })
501
502
503 #define cmpxchg_local(ptr, o, n) \
504 ({ \
505 __typeof__(*(ptr)) _o_ = (o); \
506 __typeof__(*(ptr)) _n_ = (n); \
507 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
508 (unsigned long)_n_, sizeof(*(ptr))); \
509 })
510
511 #ifdef CONFIG_PPC64
512 /*
513 * We handle most unaligned accesses in hardware. On the other hand
514 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
515 * powers of 2 writes until it reaches sufficient alignment).
516 *
517 * Based on this we disable the IP header alignment in network drivers.
518 */
519 #define NET_IP_ALIGN 0
520
521 #define cmpxchg64(ptr, o, n) \
522 ({ \
523 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
524 cmpxchg((ptr), (o), (n)); \
525 })
526 #define cmpxchg64_local(ptr, o, n) \
527 ({ \
528 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
529 cmpxchg_local((ptr), (o), (n)); \
530 })
531 #else
532 #include <asm-generic/cmpxchg-local.h>
533 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
534 #endif
535
536 extern unsigned long arch_align_stack(unsigned long sp);
537
538 /* Used in very early kernel initialization. */
539 extern unsigned long reloc_offset(void);
540 extern unsigned long add_reloc_offset(unsigned long);
541 extern void reloc_got2(unsigned long);
542
543 #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
544
545 extern struct dentry *powerpc_debugfs_root;
546
547 #endif /* __KERNEL__ */
548 #endif /* _ASM_POWERPC_SYSTEM_H */
549