1 /*
2  *  linux/include/asm-arm/proc-armv/system.h
3  *
4  *  Copyright (C) 1996 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef __ASM_PROC_SYSTEM_H
11 #define __ASM_PROC_SYSTEM_H
12 
13 #include <linux/config.h>
14 
15 #define set_cr(x)					\
16 	__asm__ __volatile__(				\
17 	"mcr	p15, 0, %0, c1, c0	@ set CR"	\
18 	: : "r" (x))
19 
20 #define CR_M	(1 << 0)	/* MMU enable				*/
21 #define CR_A	(1 << 1)	/* Alignment abort enable		*/
22 #define CR_C	(1 << 2)	/* Dcache enable			*/
23 #define CR_W	(1 << 3)	/* Write buffer enable			*/
24 #define CR_P	(1 << 4)	/* 32-bit exception handler		*/
25 #define CR_D	(1 << 5)	/* 32-bit data address range		*/
26 #define CR_L	(1 << 6)	/* Implementation defined		*/
27 #define CD_B	(1 << 7)	/* Big endian				*/
28 #define CR_S	(1 << 8)	/* System MMU protection		*/
29 #define CD_R	(1 << 9)	/* ROM MMU protection			*/
30 #define CR_F	(1 << 10)	/* Implementation defined		*/
31 #define CR_Z	(1 << 11)	/* Implementation defined		*/
32 #define CR_I	(1 << 12)	/* Icache enable			*/
33 #define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/
34 #define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/
35 
36 extern unsigned long cr_no_alignment;	/* defined in entry-armv.S */
37 extern unsigned long cr_alignment;	/* defined in entry-armv.S */
38 
39 #if __LINUX_ARM_ARCH__ >= 4
40 #define vectors_base()	((cr_alignment & CR_V) ? 0xffff0000 : 0)
41 #else
42 #define vectors_base()	(0)
43 #endif
44 
45 /*
46  * Save the current interrupt enable state & disable IRQs
47  */
48 #define local_irq_save(x)					\
49 	({							\
50 		unsigned long temp;				\
51 	__asm__ __volatile__(					\
52 	"mrs	%0, cpsr		@ local_irq_save\n"	\
53 "	orr	%1, %0, #128\n"					\
54 "	msr	cpsr_c, %1"					\
55 	: "=r" (x), "=r" (temp)					\
56 	:							\
57 	: "memory");						\
58 	})
59 
60 #define local_irq_set(x)					\
61 	({							\
62 		unsigned long temp;				\
63 	__asm__ __volatile__(					\
64 	"mrs	%0, cpsr		@ local_irq_set\n"	\
65 "	bic	%1, %0, #128\n"					\
66 "	msr	cpsr_c, %1"					\
67 	: "=r" (x), "=r" (temp)					\
68 	:							\
69 	: "memory");						\
70 	})
71 
72 /*
73  * Enable IRQs
74  */
75 #define local_irq_enable()					\
76 	({							\
77 		unsigned long temp;				\
78 	__asm__ __volatile__(					\
79 	"mrs	%0, cpsr		@ local_irq_enable\n"	\
80 "	bic	%0, %0, #128\n"					\
81 "	msr	cpsr_c, %0"					\
82 	: "=r" (temp)						\
83 	:							\
84 	: "memory");						\
85 	})
86 
87 /*
88  * Disable IRQs
89  */
90 #define local_irq_disable()					\
91 	({							\
92 		unsigned long temp;				\
93 	__asm__ __volatile__(					\
94 	"mrs	%0, cpsr		@ local_irq_disable\n"	\
95 "	orr	%0, %0, #128\n"					\
96 "	msr	cpsr_c, %0"					\
97 	: "=r" (temp)						\
98 	:							\
99 	: "memory");						\
100 	})
101 
102 /*
103  * Enable FIQs
104  */
105 #define __stf()							\
106 	({							\
107 		unsigned long temp;				\
108 	__asm__ __volatile__(					\
109 	"mrs	%0, cpsr		@ stf\n"		\
110 "	bic	%0, %0, #64\n"					\
111 "	msr	cpsr_c, %0"					\
112 	: "=r" (temp)						\
113 	:							\
114 	: "memory");						\
115 	})
116 
117 /*
118  * Disable FIQs
119  */
120 #define __clf()							\
121 	({							\
122 		unsigned long temp;				\
123 	__asm__ __volatile__(					\
124 	"mrs	%0, cpsr		@ clf\n"		\
125 "	orr	%0, %0, #64\n"					\
126 "	msr	cpsr_c, %0"					\
127 	: "=r" (temp)						\
128 	:							\
129 	: "memory");						\
130 	})
131 
132 /*
133  * Save the current interrupt enable state.
134  */
135 #define local_save_flags(x)					\
136 	({							\
137 	__asm__ __volatile__(					\
138 	"mrs	%0, cpsr		@ local_save_flags\n"	\
139 	  : "=r" (x)						\
140 	  :							\
141 	  : "memory");						\
142 	})
143 
144 /*
145  * restore saved IRQ & FIQ state
146  */
147 #define local_irq_restore(x)					\
148 	__asm__ __volatile__(					\
149 	"msr	cpsr_c, %0		@ local_irq_restore\n"	\
150 	:							\
151 	: "r" (x)						\
152 	: "memory")
153 
154 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
155 /*
156  * On the StrongARM, "swp" is terminally broken since it bypasses the
157  * cache totally.  This means that the cache becomes inconsistent, and,
158  * since we use normal loads/stores as well, this is really bad.
159  * Typically, this causes oopsen in filp_close, but could have other,
160  * more disasterous effects.  There are two work-arounds:
161  *  1. Disable interrupts and emulate the atomic swap
162  *  2. Clean the cache, perform atomic swap, flush the cache
163  *
164  * We choose (1) since its the "easiest" to achieve here and is not
165  * dependent on the processor type.
166  */
167 #define swp_is_buggy
168 #endif
169 
__xchg(unsigned long x,volatile void * ptr,int size)170 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
171 {
172 	extern void __bad_xchg(volatile void *, int);
173 	unsigned long ret;
174 #ifdef swp_is_buggy
175 	unsigned long flags;
176 #endif
177 
178 	switch (size) {
179 #ifdef swp_is_buggy
180 		case 1:
181 			local_irq_save(flags);
182 			ret = *(volatile unsigned char *)ptr;
183 			*(volatile unsigned char *)ptr = x;
184 			local_irq_restore(flags);
185 			break;
186 
187 		case 4:
188 			local_irq_save(flags);
189 			ret = *(volatile unsigned long *)ptr;
190 			*(volatile unsigned long *)ptr = x;
191 			local_irq_restore(flags);
192 			break;
193 #else
194 		case 1:	__asm__ __volatile__ ("swpb %0, %1, [%2]"
195 					: "=&r" (ret)
196 					: "r" (x), "r" (ptr)
197 					: "memory");
198 			break;
199 		case 4:	__asm__ __volatile__ ("swp %0, %1, [%2]"
200 					: "=&r" (ret)
201 					: "r" (x), "r" (ptr)
202 					: "memory");
203 			break;
204 #endif
205 		default: __bad_xchg(ptr, size), ret = 0;
206 	}
207 
208 	return ret;
209 }
210 
211 #endif
212