1 #ifndef _ALPHA_RWSEM_H
2 #define _ALPHA_RWSEM_H
3
4 /*
5 * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
6 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
7 */
8
9 #ifndef _LINUX_RWSEM_H
10 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
11 #endif
12
13 #ifdef __KERNEL__
14
15 #include <linux/compiler.h>
16
17 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
18 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
19 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
20 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
21 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
22 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
23
__down_read(struct rw_semaphore * sem)24 static inline void __down_read(struct rw_semaphore *sem)
25 {
26 long oldcount;
27 #ifndef CONFIG_SMP
28 oldcount = sem->count;
29 sem->count += RWSEM_ACTIVE_READ_BIAS;
30 #else
31 long temp;
32 __asm__ __volatile__(
33 "1: ldq_l %0,%1\n"
34 " addq %0,%3,%2\n"
35 " stq_c %2,%1\n"
36 " beq %2,2f\n"
37 " mb\n"
38 ".subsection 2\n"
39 "2: br 1b\n"
40 ".previous"
41 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
42 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
43 #endif
44 if (unlikely(oldcount < 0))
45 rwsem_down_read_failed(sem);
46 }
47
48 /*
49 * trylock for reading -- returns 1 if successful, 0 if contention
50 */
__down_read_trylock(struct rw_semaphore * sem)51 static inline int __down_read_trylock(struct rw_semaphore *sem)
52 {
53 long old, new, res;
54
55 res = sem->count;
56 do {
57 new = res + RWSEM_ACTIVE_READ_BIAS;
58 if (new <= 0)
59 break;
60 old = res;
61 res = cmpxchg(&sem->count, old, new);
62 } while (res != old);
63 return res >= 0 ? 1 : 0;
64 }
65
__down_write(struct rw_semaphore * sem)66 static inline void __down_write(struct rw_semaphore *sem)
67 {
68 long oldcount;
69 #ifndef CONFIG_SMP
70 oldcount = sem->count;
71 sem->count += RWSEM_ACTIVE_WRITE_BIAS;
72 #else
73 long temp;
74 __asm__ __volatile__(
75 "1: ldq_l %0,%1\n"
76 " addq %0,%3,%2\n"
77 " stq_c %2,%1\n"
78 " beq %2,2f\n"
79 " mb\n"
80 ".subsection 2\n"
81 "2: br 1b\n"
82 ".previous"
83 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
84 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
85 #endif
86 if (unlikely(oldcount))
87 rwsem_down_write_failed(sem);
88 }
89
90 /*
91 * trylock for writing -- returns 1 if successful, 0 if contention
92 */
__down_write_trylock(struct rw_semaphore * sem)93 static inline int __down_write_trylock(struct rw_semaphore *sem)
94 {
95 long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
96 RWSEM_ACTIVE_WRITE_BIAS);
97 if (ret == RWSEM_UNLOCKED_VALUE)
98 return 1;
99 return 0;
100 }
101
__up_read(struct rw_semaphore * sem)102 static inline void __up_read(struct rw_semaphore *sem)
103 {
104 long oldcount;
105 #ifndef CONFIG_SMP
106 oldcount = sem->count;
107 sem->count -= RWSEM_ACTIVE_READ_BIAS;
108 #else
109 long temp;
110 __asm__ __volatile__(
111 " mb\n"
112 "1: ldq_l %0,%1\n"
113 " subq %0,%3,%2\n"
114 " stq_c %2,%1\n"
115 " beq %2,2f\n"
116 ".subsection 2\n"
117 "2: br 1b\n"
118 ".previous"
119 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
120 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
121 #endif
122 if (unlikely(oldcount < 0))
123 if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
124 rwsem_wake(sem);
125 }
126
__up_write(struct rw_semaphore * sem)127 static inline void __up_write(struct rw_semaphore *sem)
128 {
129 long count;
130 #ifndef CONFIG_SMP
131 sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
132 count = sem->count;
133 #else
134 long temp;
135 __asm__ __volatile__(
136 " mb\n"
137 "1: ldq_l %0,%1\n"
138 " subq %0,%3,%2\n"
139 " stq_c %2,%1\n"
140 " beq %2,2f\n"
141 " subq %0,%3,%0\n"
142 ".subsection 2\n"
143 "2: br 1b\n"
144 ".previous"
145 :"=&r" (count), "=m" (sem->count), "=&r" (temp)
146 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
147 #endif
148 if (unlikely(count))
149 if ((int)count == 0)
150 rwsem_wake(sem);
151 }
152
153 /*
154 * downgrade write lock to read lock
155 */
__downgrade_write(struct rw_semaphore * sem)156 static inline void __downgrade_write(struct rw_semaphore *sem)
157 {
158 long oldcount;
159 #ifndef CONFIG_SMP
160 oldcount = sem->count;
161 sem->count -= RWSEM_WAITING_BIAS;
162 #else
163 long temp;
164 __asm__ __volatile__(
165 "1: ldq_l %0,%1\n"
166 " addq %0,%3,%2\n"
167 " stq_c %2,%1\n"
168 " beq %2,2f\n"
169 " mb\n"
170 ".subsection 2\n"
171 "2: br 1b\n"
172 ".previous"
173 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
174 :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
175 #endif
176 if (unlikely(oldcount < 0))
177 rwsem_downgrade_wake(sem);
178 }
179
rwsem_atomic_add(long val,struct rw_semaphore * sem)180 static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
181 {
182 #ifndef CONFIG_SMP
183 sem->count += val;
184 #else
185 long temp;
186 __asm__ __volatile__(
187 "1: ldq_l %0,%1\n"
188 " addq %0,%2,%0\n"
189 " stq_c %0,%1\n"
190 " beq %0,2f\n"
191 ".subsection 2\n"
192 "2: br 1b\n"
193 ".previous"
194 :"=&r" (temp), "=m" (sem->count)
195 :"Ir" (val), "m" (sem->count));
196 #endif
197 }
198
rwsem_atomic_update(long val,struct rw_semaphore * sem)199 static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
200 {
201 #ifndef CONFIG_SMP
202 sem->count += val;
203 return sem->count;
204 #else
205 long ret, temp;
206 __asm__ __volatile__(
207 "1: ldq_l %0,%1\n"
208 " addq %0,%3,%2\n"
209 " addq %0,%3,%0\n"
210 " stq_c %2,%1\n"
211 " beq %2,2f\n"
212 ".subsection 2\n"
213 "2: br 1b\n"
214 ".previous"
215 :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
216 :"Ir" (val), "m" (sem->count));
217
218 return ret;
219 #endif
220 }
221
222 #endif /* __KERNEL__ */
223 #endif /* _ALPHA_RWSEM_H */
224