1 /* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
2  *
3  * Written by David Howells (dhowells@redhat.com).
4  *
5  * Derived from asm-i386/semaphore.h
6  *
7  * Trylock by Brian Watson (Brian.J.Watson@compaq.com).
8  *
9  *
10  * The MSW of the count is the negated number of active writers and waiting
11  * lockers, and the LSW is the total number of active locks
12  *
13  * The lock count is initialized to 0 (no active and no waiting lockers).
14  *
15  * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
16  * uncontended lock. This can be determined because XADD returns the old value.
17  * Readers increment by 1 and see a positive value when uncontended, negative
18  * if there are writers (and maybe) readers waiting (in which case it goes to
19  * sleep).
20  *
21  * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
22  * be extended to 65534 by manually checking the whole MSW rather than relying
23  * on the S flag.
24  *
25  * The value of ACTIVE_BIAS supports up to 65535 active processes.
26  *
27  * This should be totally fair - if anything is waiting, a process that wants a
28  * lock will go to the back of the queue. When the currently active lock is
29  * released, if there's a writer at the front of the queue, then that and only
30  * that will be woken up; if there's a bunch of consequtive readers at the
31  * front, then they'll all be woken up, but no other readers will be.
32  */
33 
34 #ifndef _I386_RWSEM_H
35 #define _I386_RWSEM_H
36 
37 #ifndef _LINUX_RWSEM_H
38 #error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
39 #endif
40 
41 #ifdef __KERNEL__
42 
43 #include <linux/list.h>
44 #include <linux/spinlock.h>
45 
46 struct rwsem_waiter;
47 
48 extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
49 extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
50 extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
51 
52 /*
53  * the semaphore definition
54  */
55 struct rw_semaphore {
56 	signed long		count;
57 #define RWSEM_UNLOCKED_VALUE		0x00000000
58 #define RWSEM_ACTIVE_BIAS		0x00000001
59 #define RWSEM_ACTIVE_MASK		0x0000ffff
60 #define RWSEM_WAITING_BIAS		(-0x00010000)
61 #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
62 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
63 	spinlock_t		wait_lock;
64 	struct list_head	wait_list;
65 #if RWSEM_DEBUG
66 	int			debug;
67 #endif
68 };
69 
70 /*
71  * initialisation
72  */
73 #if RWSEM_DEBUG
74 #define __RWSEM_DEBUG_INIT      , 0
75 #else
76 #define __RWSEM_DEBUG_INIT	/* */
77 #endif
78 
79 #define __RWSEM_INITIALIZER(name) \
80 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
81 	__RWSEM_DEBUG_INIT }
82 
83 #define DECLARE_RWSEM(name) \
84 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
85 
init_rwsem(struct rw_semaphore * sem)86 static inline void init_rwsem(struct rw_semaphore *sem)
87 {
88 	sem->count = RWSEM_UNLOCKED_VALUE;
89 	spin_lock_init(&sem->wait_lock);
90 	INIT_LIST_HEAD(&sem->wait_list);
91 #if RWSEM_DEBUG
92 	sem->debug = 0;
93 #endif
94 }
95 
96 /*
97  * lock for reading
98  */
__down_read(struct rw_semaphore * sem)99 static inline void __down_read(struct rw_semaphore *sem)
100 {
101 	__asm__ __volatile__(
102 		"# beginning down_read\n\t"
103 LOCK_PREFIX	"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
104 		"  js        2f\n\t" /* jump if we weren't granted the lock */
105 		"1:\n\t"
106 		LOCK_SECTION_START("")
107 		"2:\n\t"
108 		"  pushl     %%ecx\n\t"
109 		"  pushl     %%edx\n\t"
110 		"  call      rwsem_down_read_failed\n\t"
111 		"  popl      %%edx\n\t"
112 		"  popl      %%ecx\n\t"
113 		"  jmp       1b\n"
114 		LOCK_SECTION_END
115 		"# ending down_read\n\t"
116 		: "=m"(sem->count)
117 		: "a"(sem), "m"(sem->count)
118 		: "memory", "cc");
119 }
120 
121 /*
122  * trylock for reading -- returns 1 if successful, 0 if contention
123  */
__down_read_trylock(struct rw_semaphore * sem)124 static inline int __down_read_trylock(struct rw_semaphore *sem)
125 {
126 	__s32 result, tmp;
127 	__asm__ __volatile__(
128 		"# beginning __down_read_trylock\n\t"
129 		"  movl      %0,%1\n\t"
130 		"1:\n\t"
131 		"  movl	     %1,%2\n\t"
132 		"  addl      %3,%2\n\t"
133 		"  jle	     2f\n\t"
134 LOCK_PREFIX	"  cmpxchgl  %2,%0\n\t"
135 		"  jnz	     1b\n\t"
136 		"2:\n\t"
137 		"# ending __down_read_trylock\n\t"
138 		: "+m"(sem->count), "=&a"(result), "=&r"(tmp)
139 		: "i"(RWSEM_ACTIVE_READ_BIAS)
140 		: "memory", "cc");
141 	return result>=0 ? 1 : 0;
142 }
143 
144 /*
145  * lock for writing
146  */
__down_write(struct rw_semaphore * sem)147 static inline void __down_write(struct rw_semaphore *sem)
148 {
149 	int tmp;
150 
151 	tmp = RWSEM_ACTIVE_WRITE_BIAS;
152 	__asm__ __volatile__(
153 		"# beginning down_write\n\t"
154 LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
155 		"  testl     %%edx,%%edx\n\t" /* was the count 0 before? */
156 		"  jnz       2f\n\t" /* jump if we weren't granted the lock */
157 		"1:\n\t"
158 		LOCK_SECTION_START("")
159 		"2:\n\t"
160 		"  pushl     %%ecx\n\t"
161 		"  call      rwsem_down_write_failed\n\t"
162 		"  popl      %%ecx\n\t"
163 		"  jmp       1b\n"
164 		LOCK_SECTION_END
165 		"# ending down_write"
166 		: "=m"(sem->count), "=d"(tmp)
167 		: "a"(sem), "1"(tmp), "m"(sem->count)
168 		: "memory", "cc");
169 }
170 
171 /*
172  * trylock for writing -- returns 1 if successful, 0 if contention
173  */
__down_write_trylock(struct rw_semaphore * sem)174 static inline int __down_write_trylock(struct rw_semaphore *sem)
175 {
176 	signed long ret = cmpxchg(&sem->count,
177 				  RWSEM_UNLOCKED_VALUE,
178 				  RWSEM_ACTIVE_WRITE_BIAS);
179 	if (ret == RWSEM_UNLOCKED_VALUE)
180 		return 1;
181 	return 0;
182 }
183 
184 /*
185  * unlock after reading
186  */
__up_read(struct rw_semaphore * sem)187 static inline void __up_read(struct rw_semaphore *sem)
188 {
189 	__s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
190 	__asm__ __volatile__(
191 		"# beginning __up_read\n\t"
192 LOCK_PREFIX	"  xadd      %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
193 		"  js        2f\n\t" /* jump if the lock is being waited upon */
194 		"1:\n\t"
195 		LOCK_SECTION_START("")
196 		"2:\n\t"
197 		"  decw      %%dx\n\t" /* do nothing if still outstanding active readers */
198 		"  jnz       1b\n\t"
199 		"  pushl     %%ecx\n\t"
200 		"  call      rwsem_wake\n\t"
201 		"  popl      %%ecx\n\t"
202 		"  jmp       1b\n"
203 		LOCK_SECTION_END
204 		"# ending __up_read\n"
205 		: "=m"(sem->count), "=d"(tmp)
206 		: "a"(sem), "1"(tmp), "m"(sem->count)
207 		: "memory", "cc");
208 }
209 
210 /*
211  * unlock after writing
212  */
__up_write(struct rw_semaphore * sem)213 static inline void __up_write(struct rw_semaphore *sem)
214 {
215 	__asm__ __volatile__(
216 		"# beginning __up_write\n\t"
217 		"  movl      %2,%%edx\n\t"
218 LOCK_PREFIX	"  xaddl     %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
219 		"  jnz       2f\n\t" /* jump if the lock is being waited upon */
220 		"1:\n\t"
221 		LOCK_SECTION_START("")
222 		"2:\n\t"
223 		"  decw      %%dx\n\t" /* did the active count reduce to 0? */
224 		"  jnz       1b\n\t" /* jump back if not */
225 		"  pushl     %%ecx\n\t"
226 		"  call      rwsem_wake\n\t"
227 		"  popl      %%ecx\n\t"
228 		"  jmp       1b\n"
229 		LOCK_SECTION_END
230 		"# ending __up_write\n"
231 		: "=m"(sem->count)
232 		: "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count)
233 		: "memory", "cc", "edx");
234 }
235 
236 /*
237  * implement atomic add functionality
238  */
rwsem_atomic_add(int delta,struct rw_semaphore * sem)239 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
240 {
241 	__asm__ __volatile__(
242 LOCK_PREFIX	"addl %1,%0"
243 		:"=m"(sem->count)
244 		:"ir"(delta), "m"(sem->count));
245 }
246 
247 /*
248  * implement exchange and add functionality
249  */
rwsem_atomic_update(int delta,struct rw_semaphore * sem)250 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
251 {
252 	int tmp = delta;
253 
254 	__asm__ __volatile__(
255 LOCK_PREFIX	"xadd %0,(%2)"
256 		: "+r"(tmp), "=m"(sem->count)
257 		: "r"(sem), "m"(sem->count)
258 		: "memory");
259 
260 	return tmp+delta;
261 }
262 
263 #endif /* __KERNEL__ */
264 #endif /* _I386_RWSEM_H */
265