1 #ifndef _ASM_PARISC_ATOMIC_H_
2 #define _ASM_PARISC_ATOMIC_H_
3
4 #include <linux/config.h>
5 #include <asm/system.h>
6
7 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. */
8
9 /*
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc..
12 *
13 * And probably incredibly slow on parisc. OTOH, we don't
14 * have to write any serious assembly. prumpf
15 */
16 #ifdef CONFIG_SMP
17 #include <asm/spinlock_t.h>
18
19 /* Use an array of spinlocks for our atomic_ts.
20 ** Hash function to index into a different SPINLOCK.
21 ** Since "a" is usually an address, ">>8" makes one spinlock per 64-bytes.
22 */
23 # define ATOMIC_HASH_SIZE 4
24 # define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long) a)>>8)&(ATOMIC_HASH_SIZE-1)])
25
26 extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
27
28 /* copied from <asm/spinlock.h> and modified.
29 * No CONFIG_DEBUG_SPINLOCK support.
30 *
31 * XXX REVISIT these could be renamed and moved to spinlock_t.h as well
32 */
33 #define SPIN_LOCK(x) do { while(__ldcw(&(x)->lock) == 0); } while(0)
34 #define SPIN_UNLOCK(x) do { (x)->lock = 1; } while(0)
35
36 #else /* CONFIG_SMP */
37
38 #define ATOMIC_HASH_SIZE 1
39 #define ATOMIC_HASH(a) (0)
40
41 #define SPIN_LOCK(x) (void)(x)
42 #define SPIN_UNLOCK(x) do { } while(0)
43
44 #endif /* CONFIG_SMP */
45
46 /* copied from <linux/spinlock.h> and modified */
47 #define SPIN_LOCK_IRQSAVE(lock, flags) do { \
48 local_irq_save(flags); SPIN_LOCK(lock); \
49 } while (0)
50
51 #define SPIN_UNLOCK_IRQRESTORE(lock, flags) do { \
52 SPIN_UNLOCK(lock); local_irq_restore(flags); \
53 } while (0)
54
55 /* Note that we need not lock read accesses - aligned word writes/reads
56 * are atomic, so a reader never sees unconsistent values.
57 *
58 * Cache-line alignment would conflict with, for example, linux/module.h
59 */
60
61 typedef struct {
62 volatile int counter;
63 } atomic_t;
64
65
66 /* This should get optimized out since it's never called.
67 ** Or get a link error if xchg is used "wrong".
68 */
69 extern void __xchg_called_with_bad_pointer(void);
70
71 /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
72 extern unsigned long __xchg8(char, char *);
73 extern unsigned long __xchg32(int, int *);
74 #ifdef __LP64__
75 extern unsigned long __xchg64(unsigned long, unsigned long *);
76 #endif
77
78 /* optimizer better get rid of switch since size is a constant */
__xchg(unsigned long x,__volatile__ void * ptr,int size)79 static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
80 int size)
81 {
82
83 switch(size) {
84 #ifdef __LP64__
85 case 8: return __xchg64(x,(unsigned long *) ptr);
86 #endif
87 case 4: return __xchg32((int) x, (int *) ptr);
88 case 1: return __xchg8((char) x, (char *) ptr);
89 }
90 __xchg_called_with_bad_pointer();
91 return x;
92 }
93
94
95 /*
96 ** REVISIT - Abandoned use of LDCW in xchg() for now:
97 ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
98 ** o and while we are at it, could 64-bit code use LDCD too?
99 **
100 ** if (__builtin_constant_p(x) && (x == NULL))
101 ** if (((unsigned long)p & 0xf) == 0)
102 ** return __ldcw(p);
103 */
104 #define xchg(ptr,x) \
105 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
106
107
108 #define __HAVE_ARCH_CMPXCHG 1
109
110 /* bug catcher for when unsupported size is used - won't link */
111 extern void __cmpxchg_called_with_bad_pointer(void);
112
113 /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
114 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
115 extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
116
117 /* don't worry...optimizer will get rid of most of this */
118 static __inline__ unsigned long
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new_,int size)119 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
120 {
121 switch(size) {
122 #ifdef __LP64__
123 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
124 #endif
125 case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
126 }
127 __cmpxchg_called_with_bad_pointer();
128 return old;
129 }
130
131 #define cmpxchg(ptr,o,n) \
132 ({ \
133 __typeof__(*(ptr)) _o_ = (o); \
134 __typeof__(*(ptr)) _n_ = (n); \
135 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
136 (unsigned long)_n_, sizeof(*(ptr))); \
137 })
138
139
140
141 /* It's possible to reduce all atomic operations to either
142 * __atomic_add_return, __atomic_set and __atomic_ret (the latter
143 * is there only for consistency). */
144
__atomic_add_return(int i,atomic_t * v)145 static __inline__ int __atomic_add_return(int i, atomic_t *v)
146 {
147 int ret;
148 unsigned long flags;
149 SPIN_LOCK_IRQSAVE(ATOMIC_HASH(v), flags);
150
151 ret = (v->counter += i);
152
153 SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(v), flags);
154 return ret;
155 }
156
__atomic_set(atomic_t * v,int i)157 static __inline__ void __atomic_set(atomic_t *v, int i)
158 {
159 unsigned long flags;
160 SPIN_LOCK_IRQSAVE(ATOMIC_HASH(v), flags);
161
162 v->counter = i;
163
164 SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(v), flags);
165 }
166
__atomic_read(atomic_t * v)167 static __inline__ int __atomic_read(atomic_t *v)
168 {
169 return v->counter;
170 }
171
172 /* exported interface */
173
174 #define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
175 #define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v))))
176 #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
177 #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
178
179 #define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
180 #define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
181 #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
182 #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
183
184 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
185
186 #define atomic_set(v,i) (__atomic_set((v),i))
187 #define atomic_read(v) (__atomic_read(v))
188
189 #define ATOMIC_INIT(i) { (i) }
190
191 #define smp_mb__before_atomic_dec() smp_mb()
192 #define smp_mb__after_atomic_dec() smp_mb()
193 #define smp_mb__before_atomic_inc() smp_mb()
194 #define smp_mb__after_atomic_inc() smp_mb()
195
196 #endif
197