1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  *
14  * Do not include directly; use <asm/atomic.h>.
15  */
16 
17 #ifndef _ASM_TILE_ATOMIC_32_H
18 #define _ASM_TILE_ATOMIC_32_H
19 
20 #include <arch/chip.h>
21 
22 #ifndef __ASSEMBLY__
23 
24 /* Tile-specific routines to support <asm/atomic.h>. */
25 int _atomic_xchg(atomic_t *v, int n);
26 int _atomic_xchg_add(atomic_t *v, int i);
27 int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
28 int _atomic_cmpxchg(atomic_t *v, int o, int n);
29 
30 /**
31  * atomic_xchg - atomically exchange contents of memory with a new value
32  * @v: pointer of type atomic_t
33  * @i: integer value to store in memory
34  *
35  * Atomically sets @v to @i and returns old @v
36  */
atomic_xchg(atomic_t * v,int n)37 static inline int atomic_xchg(atomic_t *v, int n)
38 {
39 	smp_mb();  /* barrier for proper semantics */
40 	return _atomic_xchg(v, n);
41 }
42 
43 /**
44  * atomic_cmpxchg - atomically exchange contents of memory if it matches
45  * @v: pointer of type atomic_t
46  * @o: old value that memory should have
47  * @n: new value to write to memory if it matches
48  *
49  * Atomically checks if @v holds @o and replaces it with @n if so.
50  * Returns the old value at @v.
51  */
atomic_cmpxchg(atomic_t * v,int o,int n)52 static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
53 {
54 	smp_mb();  /* barrier for proper semantics */
55 	return _atomic_cmpxchg(v, o, n);
56 }
57 
58 /**
59  * atomic_add - add integer to atomic variable
60  * @i: integer value to add
61  * @v: pointer of type atomic_t
62  *
63  * Atomically adds @i to @v.
64  */
atomic_add(int i,atomic_t * v)65 static inline void atomic_add(int i, atomic_t *v)
66 {
67 	_atomic_xchg_add(v, i);
68 }
69 
70 /**
71  * atomic_add_return - add integer and return
72  * @v: pointer of type atomic_t
73  * @i: integer value to add
74  *
75  * Atomically adds @i to @v and returns @i + @v
76  */
atomic_add_return(int i,atomic_t * v)77 static inline int atomic_add_return(int i, atomic_t *v)
78 {
79 	smp_mb();  /* barrier for proper semantics */
80 	return _atomic_xchg_add(v, i) + i;
81 }
82 
83 /**
84  * atomic_add_unless - add unless the number is already a given value
85  * @v: pointer of type atomic_t
86  * @a: the amount to add to v...
87  * @u: ...unless v is equal to u.
88  *
89  * Atomically adds @a to @v, so long as @v was not already @u.
90  * Returns non-zero if @v was not @u, and zero otherwise.
91  */
atomic_add_unless(atomic_t * v,int a,int u)92 static inline int atomic_add_unless(atomic_t *v, int a, int u)
93 {
94 	smp_mb();  /* barrier for proper semantics */
95 	return _atomic_xchg_add_unless(v, a, u) != u;
96 }
97 
98 /**
99  * atomic_set - set atomic variable
100  * @v: pointer of type atomic_t
101  * @i: required value
102  *
103  * Atomically sets the value of @v to @i.
104  *
105  * atomic_set() can't be just a raw store, since it would be lost if it
106  * fell between the load and store of one of the other atomic ops.
107  */
atomic_set(atomic_t * v,int n)108 static inline void atomic_set(atomic_t *v, int n)
109 {
110 	_atomic_xchg(v, n);
111 }
112 
113 #define xchg(ptr, x) ((typeof(*(ptr))) \
114   ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
115    atomic_xchg((atomic_t *)(ptr), (long)(x)) : \
116    __xchg_called_with_bad_pointer()))
117 
118 #define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \
119   ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
120    atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \
121    __cmpxchg_called_with_bad_pointer()))
122 
123 /* A 64bit atomic type */
124 
125 typedef struct {
126 	u64 __aligned(8) counter;
127 } atomic64_t;
128 
129 #define ATOMIC64_INIT(val) { (val) }
130 
131 u64 _atomic64_xchg(atomic64_t *v, u64 n);
132 u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
133 u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
134 u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
135 
136 /**
137  * atomic64_read - read atomic variable
138  * @v: pointer of type atomic64_t
139  *
140  * Atomically reads the value of @v.
141  */
atomic64_read(const atomic64_t * v)142 static inline u64 atomic64_read(const atomic64_t *v)
143 {
144 	/*
145 	 * Requires an atomic op to read both 32-bit parts consistently.
146 	 * Casting away const is safe since the atomic support routines
147 	 * do not write to memory if the value has not been modified.
148 	 */
149 	return _atomic64_xchg_add((atomic64_t *)v, 0);
150 }
151 
152 /**
153  * atomic64_xchg - atomically exchange contents of memory with a new value
154  * @v: pointer of type atomic64_t
155  * @i: integer value to store in memory
156  *
157  * Atomically sets @v to @i and returns old @v
158  */
atomic64_xchg(atomic64_t * v,u64 n)159 static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
160 {
161 	smp_mb();  /* barrier for proper semantics */
162 	return _atomic64_xchg(v, n);
163 }
164 
165 /**
166  * atomic64_cmpxchg - atomically exchange contents of memory if it matches
167  * @v: pointer of type atomic64_t
168  * @o: old value that memory should have
169  * @n: new value to write to memory if it matches
170  *
171  * Atomically checks if @v holds @o and replaces it with @n if so.
172  * Returns the old value at @v.
173  */
atomic64_cmpxchg(atomic64_t * v,u64 o,u64 n)174 static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
175 {
176 	smp_mb();  /* barrier for proper semantics */
177 	return _atomic64_cmpxchg(v, o, n);
178 }
179 
180 /**
181  * atomic64_add - add integer to atomic variable
182  * @i: integer value to add
183  * @v: pointer of type atomic64_t
184  *
185  * Atomically adds @i to @v.
186  */
atomic64_add(u64 i,atomic64_t * v)187 static inline void atomic64_add(u64 i, atomic64_t *v)
188 {
189 	_atomic64_xchg_add(v, i);
190 }
191 
192 /**
193  * atomic64_add_return - add integer and return
194  * @v: pointer of type atomic64_t
195  * @i: integer value to add
196  *
197  * Atomically adds @i to @v and returns @i + @v
198  */
atomic64_add_return(u64 i,atomic64_t * v)199 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
200 {
201 	smp_mb();  /* barrier for proper semantics */
202 	return _atomic64_xchg_add(v, i) + i;
203 }
204 
205 /**
206  * atomic64_add_unless - add unless the number is already a given value
207  * @v: pointer of type atomic64_t
208  * @a: the amount to add to v...
209  * @u: ...unless v is equal to u.
210  *
211  * Atomically adds @a to @v, so long as @v was not already @u.
212  * Returns non-zero if @v was not @u, and zero otherwise.
213  */
atomic64_add_unless(atomic64_t * v,u64 a,u64 u)214 static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
215 {
216 	smp_mb();  /* barrier for proper semantics */
217 	return _atomic64_xchg_add_unless(v, a, u) != u;
218 }
219 
220 /**
221  * atomic64_set - set atomic variable
222  * @v: pointer of type atomic64_t
223  * @i: required value
224  *
225  * Atomically sets the value of @v to @i.
226  *
227  * atomic64_set() can't be just a raw store, since it would be lost if it
228  * fell between the load and store of one of the other atomic ops.
229  */
atomic64_set(atomic64_t * v,u64 n)230 static inline void atomic64_set(atomic64_t *v, u64 n)
231 {
232 	_atomic64_xchg(v, n);
233 }
234 
235 #define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
236 #define atomic64_inc(v)			atomic64_add(1LL, (v))
237 #define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
238 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
239 #define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
240 #define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
241 #define atomic64_sub(i, v)		atomic64_add(-(i), (v))
242 #define atomic64_dec(v)			atomic64_sub(1LL, (v))
243 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
244 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
245 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
246 
247 /*
248  * We need to barrier before modifying the word, since the _atomic_xxx()
249  * routines just tns the lock and then read/modify/write of the word.
250  * But after the word is updated, the routine issues an "mf" before returning,
251  * and since it's a function call, we don't even need a compiler barrier.
252  */
253 #define smp_mb__before_atomic_dec()	smp_mb()
254 #define smp_mb__before_atomic_inc()	smp_mb()
255 #define smp_mb__after_atomic_dec()	do { } while (0)
256 #define smp_mb__after_atomic_inc()	do { } while (0)
257 
258 #endif /* !__ASSEMBLY__ */
259 
260 /*
261  * Internal definitions only beyond this point.
262  */
263 
264 #define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
265   (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
266 
267 #if ATOMIC_LOCKS_FOUND_VIA_TABLE()
268 
269 /* Number of entries in atomic_lock_ptr[]. */
270 #define ATOMIC_HASH_L1_SHIFT 6
271 #define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
272 
273 /* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
274 #define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
275 #define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
276 
277 #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
278 
279 /*
280  * Number of atomic locks in atomic_locks[]. Must be a power of two.
281  * There is no reason for more than PAGE_SIZE / 8 entries, since that
282  * is the maximum number of pointer bits we can use to index this.
283  * And we cannot have more than PAGE_SIZE / 4, since this has to
284  * fit on a single page and each entry takes 4 bytes.
285  */
286 #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
287 #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
288 
289 #ifndef __ASSEMBLY__
290 extern int atomic_locks[];
291 #endif
292 
293 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
294 
295 /*
296  * All the code that may fault while holding an atomic lock must
297  * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
298  * can correctly release and reacquire the lock.  Note that we
299  * mention the register number in a comment in "lib/atomic_asm.S" to help
300  * assembly coders from using this register by mistake, so if it
301  * is changed here, change that comment as well.
302  */
303 #define ATOMIC_LOCK_REG 20
304 #define ATOMIC_LOCK_REG_NAME r20
305 
306 #ifndef __ASSEMBLY__
307 /* Called from setup to initialize a hash table to point to per_cpu locks. */
308 void __init_atomic_per_cpu(void);
309 
310 #ifdef CONFIG_SMP
311 /* Support releasing the atomic lock in do_page_fault_ics(). */
312 void __atomic_fault_unlock(int *lock_ptr);
313 #endif
314 
315 /* Private helper routines in lib/atomic_asm_32.S */
316 extern struct __get_user __atomic_cmpxchg(volatile int *p,
317 					  int *lock, int o, int n);
318 extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
319 extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
320 extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
321 						  int *lock, int o, int n);
322 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
323 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
324 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
325 extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
326 extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
327 extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
328 extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
329 				      int *lock, u64 o, u64 n);
330 
331 #endif /* !__ASSEMBLY__ */
332 
333 #endif /* _ASM_TILE_ATOMIC_32_H */
334