1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996  Linus Torvalds
7  * Copyright (C) 1998, 99, 2000, 01  Ralf Baechle
8  * Copyright (C) 1999, 2000, 01  Silicon Graphics, Inc.
9  * Copyright (C) 2000, 01 MIPS Technologies, Inc.
10  */
11 #ifndef _ASM_SEMAPHORE_H
12 #define _ASM_SEMAPHORE_H
13 
14 #include <linux/compiler.h>
15 #include <linux/config.h>
16 #include <linux/spinlock.h>
17 #include <linux/wait.h>
18 #include <linux/rwsem.h>
19 #include <asm/atomic.h>
20 
21 struct semaphore {
22 #ifdef __MIPSEB__
23 	atomic_t count;
24 	atomic_t waking;
25 #else
26 	atomic_t waking;
27 	atomic_t count;
28 #endif
29 	wait_queue_head_t wait;
30 #if WAITQUEUE_DEBUG
31 	long __magic;
32 #endif
33 } __attribute__((aligned(8)));
34 
35 #if WAITQUEUE_DEBUG
36 # define __SEM_DEBUG_INIT(name) , .__magic = (long)&(name).__magic
37 #else
38 # define __SEM_DEBUG_INIT(name)
39 #endif
40 
41 #define __SEMAPHORE_INITIALIZER(name,_count) {				\
42 	.count	= ATOMIC_INIT(_count),					\
43 	.waking	= ATOMIC_INIT(0),					\
44 	.wait	= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)		\
45 	__SEM_DEBUG_INIT(name)						\
46 }
47 
48 #define __MUTEX_INITIALIZER(name) __SEMAPHORE_INITIALIZER(name, 1)
49 
50 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
51 	struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
52 
53 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
54 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
55 
sema_init(struct semaphore * sem,int val)56 static inline void sema_init (struct semaphore *sem, int val)
57 {
58 	atomic_set(&sem->count, val);
59 	atomic_set(&sem->waking, 0);
60 	init_waitqueue_head(&sem->wait);
61 #if WAITQUEUE_DEBUG
62 	sem->__magic = (long)&sem->__magic;
63 #endif
64 }
65 
init_MUTEX(struct semaphore * sem)66 static inline void init_MUTEX (struct semaphore *sem)
67 {
68 	sema_init(sem, 1);
69 }
70 
init_MUTEX_LOCKED(struct semaphore * sem)71 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
72 {
73 	sema_init(sem, 0);
74 }
75 
76 #ifndef CONFIG_CPU_HAS_LLDSCD
77 /*
78  * On machines without lld/scd we need a spinlock to make the manipulation of
79  * sem->count and sem->waking atomic.
80  */
81 extern spinlock_t semaphore_lock;
82 #endif
83 
84 extern void __down_failed(struct semaphore * sem);
85 extern int  __down_failed_interruptible(struct semaphore * sem);
86 extern void __up_wakeup(struct semaphore * sem);
87 
down(struct semaphore * sem)88 static inline void down(struct semaphore * sem)
89 {
90 	int count;
91 
92 #if WAITQUEUE_DEBUG
93 	CHECK_MAGIC(sem->__magic);
94 #endif
95 	count = atomic_dec_return(&sem->count);
96 	if (unlikely(count < 0))
97 		__down_failed(sem);
98 }
99 
100 /*
101  * Interruptible try to acquire a semaphore.  If we obtained
102  * it, return zero.  If we were interrupted, returns -EINTR
103  */
down_interruptible(struct semaphore * sem)104 static inline int down_interruptible(struct semaphore * sem)
105 {
106 	int count;
107 
108 #if WAITQUEUE_DEBUG
109 	CHECK_MAGIC(sem->__magic);
110 #endif
111 	count = atomic_dec_return(&sem->count);
112 	if (unlikely(count < 0))
113 		return __down_failed_interruptible(sem);
114 
115 	return 0;
116 }
117 
118 #ifdef CONFIG_CPU_HAS_LLDSCD
119 
120 /*
121  * down_trylock returns 0 on success, 1 if we failed to get the lock.
122  *
123  * We must manipulate count and waking simultaneously and atomically.
124  * Here, we do this by using lld/scd on the pair of 32-bit words.
125  *
126  * Pseudocode:
127  *
128  *   Decrement(sem->count)
129  *   If(sem->count >=0) {
130  *	Return(SUCCESS)			// resource is free
131  *   } else {
132  *	If(sem->waking <= 0) {		// if no wakeup pending
133  *	   Increment(sem->count)	// undo decrement
134  *	   Return(FAILURE)
135  *      } else {
136  *	   Decrement(sem->waking)	// otherwise "steal" wakeup
137  *	   Return(SUCCESS)
138  *	}
139  *   }
140  */
down_trylock(struct semaphore * sem)141 static inline int down_trylock(struct semaphore * sem)
142 {
143 	long ret, tmp, tmp2, sub;
144 
145 #if WAITQUEUE_DEBUG
146 	CHECK_MAGIC(sem->__magic);
147 #endif
148 
149 	__asm__ __volatile__(
150 	"	.set	mips3			# down_trylock		\n"
151 	"0:	lld	%1, %4						\n"
152 	"	dli	%3, 0x0000000100000000	# count -= 1		\n"
153 	"	dsubu	%1, %3						\n"
154 	"	li	%0, 0			# ret = 0		\n"
155 	"	bgez	%1, 2f			# if count >= 0		\n"
156 	"	sll	%2, %1, 0		# extract waking	\n"
157 	"	blez	%2, 1f			# if waking < 0 -> 1f	\n"
158 	"	daddiu	%1, %1, -1		# waking -= 1		\n"
159 	"	b	2f						\n"
160 	"1:	daddu	%1, %1, %3		# count += 1		\n"
161 	"	li	%0, 1			# ret = 1		\n"
162 	"2:	scd	%1, %4						\n"
163 	"	beqz	%1, 0b						\n"
164 	"	sync							\n"
165 	"	.set	mips0						\n"
166 	: "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
167 	: "m"(*sem)
168 	: "memory");
169 
170 	return ret;
171 }
172 
173 /*
174  * Note! This is subtle. We jump to wake people up only if
175  * the semaphore was negative (== somebody was waiting on it).
176  */
up(struct semaphore * sem)177 static inline void up(struct semaphore * sem)
178 {
179 	unsigned long tmp, tmp2;
180 	int count;
181 
182 #if WAITQUEUE_DEBUG
183 	CHECK_MAGIC(sem->__magic);
184 #endif
185 	/*
186 	 * We must manipulate count and waking simultaneously and atomically.
187 	 * Otherwise we have races between up and __down_failed_interruptible
188 	 * waking up on a signal.
189 	 */
190 
191 	__asm__ __volatile__(
192 	"	.set	mips3					\n"
193 	"	sync			# up			\n"
194 	"1:	lld	%1, %3					\n"
195 	"	dsra32	%0, %1, 0	# extract count to %0	\n"
196 	"	daddiu	%0, 1		# count += 1		\n"
197 	"	slti	%2, %0, 1	# %3 = (%0 <= 0)	\n"
198 	"	daddu	%1, %2		# waking += %3		\n"
199 	"	dsll32 %1, %1, 0	# zero-extend %1	\n"
200 	"	dsrl32 %1, %1, 0				\n"
201 	"	dsll32	%2, %0, 0	# Reassemble union	\n"
202 	"	or	%1, %2		# from count and waking	\n"
203 	"	scd	%1, %3					\n"
204 	"	beqz	%1, 1b					\n"
205 	"	.set	mips0					\n"
206 	: "=&r"(count), "=&r"(tmp), "=&r"(tmp2), "+m"(*sem)
207 	:
208 	: "memory");
209 
210 	if (unlikely(count <= 0))
211 		__up_wakeup(sem);
212 }
213 
214 #else
215 
216 /*
217  * Non-blockingly attempt to down() a semaphore.
218  * Returns zero if we acquired it
219  */
down_trylock(struct semaphore * sem)220 static inline int down_trylock(struct semaphore * sem)
221 {
222 	unsigned long flags;
223 	int count, waking;
224 	int ret = 0;
225 
226 #if WAITQUEUE_DEBUG
227 	CHECK_MAGIC(sem->__magic);
228 #endif
229 
230 	spin_lock_irqsave(&semaphore_lock, flags);
231 	count = atomic_read(&sem->count) - 1;
232 	atomic_set(&sem->count, count);
233 	if (unlikely(count < 0)) {
234 		waking = atomic_read(&sem->waking);
235 		if (waking <= 0) {
236 			atomic_set(&sem->count, count + 1);
237 			ret = 1;
238 		} else {
239 			atomic_set(&sem->waking, waking - 1);
240 			ret = 0;
241 		}
242 	}
243 	spin_unlock_irqrestore(&semaphore_lock, flags);
244 
245 	return ret;
246 }
247 
248 /*
249  * Note! This is subtle. We jump to wake people up only if
250  * the semaphore was negative (== somebody was waiting on it).
251  */
up(struct semaphore * sem)252 static inline void up(struct semaphore * sem)
253 {
254 	unsigned long flags;
255 	int count, waking;
256 
257 #if WAITQUEUE_DEBUG
258 	CHECK_MAGIC(sem->__magic);
259 #endif
260 	/*
261 	 * We must manipulate count and waking simultaneously and atomically.
262 	 * Otherwise we have races between up and __down_failed_interruptible
263 	 * waking up on a signal.
264 	 */
265 
266 	spin_lock_irqsave(&semaphore_lock, flags);
267 	count = atomic_read(&sem->count) + 1;
268 	waking = atomic_read(&sem->waking);
269 	if (count <= 0)
270 		waking++;
271 	atomic_set(&sem->count, count);
272 	atomic_set(&sem->waking, waking);
273 	spin_unlock_irqrestore(&semaphore_lock, flags);
274 
275 	if (unlikely(count <= 0))
276 		__up_wakeup(sem);
277 }
278 
279 #endif /* CONFIG_CPU_HAS_LLDSCD */
280 
sem_getcount(struct semaphore * sem)281 static inline int sem_getcount(struct semaphore *sem)
282 {
283 	return atomic_read(&sem->count);
284 }
285 
286 #endif /* _ASM_SEMAPHORE_H */
287