1 #ifndef _ALPHA_SEMAPHORE_H
2 #define _ALPHA_SEMAPHORE_H
3 
4 /*
5  * SMP- and interrupt-safe semaphores..
6  *
7  * (C) Copyright 1996 Linus Torvalds
8  * (C) Copyright 1996, 2000 Richard Henderson
9  */
10 
11 #include <asm/current.h>
12 #include <asm/system.h>
13 #include <asm/atomic.h>
14 #include <linux/compiler.h>
15 #include <linux/wait.h>
16 #include <linux/rwsem.h>
17 
18 struct semaphore {
19 	/* Careful, inline assembly knows about the position of these two.  */
20 	atomic_t count __attribute__((aligned(8)));
21 	atomic_t waking;		/* biased by -1 */
22 
23 	wait_queue_head_t wait;
24 #if WAITQUEUE_DEBUG
25 	long __magic;
26 #endif
27 };
28 
29 #if WAITQUEUE_DEBUG
30 # define __SEM_DEBUG_INIT(name)		, (long)&(name).__magic
31 #else
32 # define __SEM_DEBUG_INIT(name)
33 #endif
34 
35 #define __SEMAPHORE_INITIALIZER(name,count)		\
36 	{ ATOMIC_INIT(count), ATOMIC_INIT(-1),		\
37 	  __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
38 	  __SEM_DEBUG_INIT(name) }
39 
40 #define __MUTEX_INITIALIZER(name) \
41 	__SEMAPHORE_INITIALIZER(name,1)
42 
43 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
44 	struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
45 
46 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
47 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
48 
sema_init(struct semaphore * sem,int val)49 static inline void sema_init(struct semaphore *sem, int val)
50 {
51 	/*
52 	 * Logically,
53 	 *   *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
54 	 * except that gcc produces better initializing by parts yet.
55 	 */
56 
57 	atomic_set(&sem->count, val);
58 	atomic_set(&sem->waking, -1);
59 	init_waitqueue_head(&sem->wait);
60 #if WAITQUEUE_DEBUG
61 	sem->__magic = (long)&sem->__magic;
62 #endif
63 }
64 
init_MUTEX(struct semaphore * sem)65 static inline void init_MUTEX (struct semaphore *sem)
66 {
67 	sema_init(sem, 1);
68 }
69 
init_MUTEX_LOCKED(struct semaphore * sem)70 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
71 {
72 	sema_init(sem, 0);
73 }
74 
75 extern void down(struct semaphore *);
76 extern void __down_failed(struct semaphore *);
77 extern int  down_interruptible(struct semaphore *);
78 extern int  __down_failed_interruptible(struct semaphore *);
79 extern int  down_trylock(struct semaphore *);
80 extern void up(struct semaphore *);
81 extern void __up_wakeup(struct semaphore *);
82 
sem_getcount(struct semaphore * sem)83 static inline int sem_getcount(struct semaphore *sem)
84 {
85 	return atomic_read(&sem->count);
86 }
87 
88 /*
89  * Hidden out of line code is fun, but extremely messy.  Rely on newer
90  * compilers to do a respectable job with this.  The contention cases
91  * are handled out of line in arch/alpha/kernel/semaphore.c.
92  */
93 
__down(struct semaphore * sem)94 static inline void __down(struct semaphore *sem)
95 {
96 	long count = atomic_dec_return(&sem->count);
97 	if (unlikely(count < 0))
98 		__down_failed(sem);
99 }
100 
__down_interruptible(struct semaphore * sem)101 static inline int __down_interruptible(struct semaphore *sem)
102 {
103 	long count = atomic_dec_return(&sem->count);
104 	if (unlikely(count < 0))
105 		return __down_failed_interruptible(sem);
106 	return 0;
107 }
108 
109 /*
110  * down_trylock returns 0 on success, 1 if we failed to get the lock.
111  *
112  * We must manipulate count and waking simultaneously and atomically.
113  * Do this by using ll/sc on the pair of 32-bit words.
114  */
115 
__down_trylock(struct semaphore * sem)116 static inline int __down_trylock(struct semaphore * sem)
117 {
118 	long ret, tmp, tmp2, sub;
119 
120 	/* "Equivalent" C.  Note that we have to do this all without
121 	   (taken) branches in order to be a valid ll/sc sequence.
122 
123 	   do {
124 		tmp = ldq_l;
125 		sub = 0x0000000100000000;
126 		ret = ((int)tmp <= 0);		// count <= 0 ?
127 		// Note that if count=0, the decrement overflows into
128 		// waking, so cancel the 1 loaded above.  Also cancel
129 		// it if the lock was already free.
130 		if ((int)tmp >= 0) sub = 0;	// count >= 0 ?
131 		ret &= ((long)tmp < 0);		// waking < 0 ?
132 		sub += 1;
133 		if (ret) break;
134 		tmp -= sub;
135 		tmp = stq_c = tmp;
136 	   } while (tmp == 0);
137 	*/
138 
139 	__asm__ __volatile__(
140 		"1:	ldq_l	%1,%4\n"
141 		"	lda	%3,1\n"
142 		"	addl	%1,0,%2\n"
143 		"	sll	%3,32,%3\n"
144 		"	cmple	%2,0,%0\n"
145 		"	cmovge	%2,0,%3\n"
146 		"	cmplt	%1,0,%2\n"
147 		"	addq	%3,1,%3\n"
148 		"	and	%0,%2,%0\n"
149 		"	bne	%0,2f\n"
150 		"	subq	%1,%3,%1\n"
151 		"	stq_c	%1,%4\n"
152 		"	beq	%1,3f\n"
153 		"2:	mb\n"
154 		".subsection 2\n"
155 		"3:	br	1b\n"
156 		".previous"
157 		: "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
158 		: "m"(*sem)
159 		: "memory");
160 
161 	return ret;
162 }
163 
__up(struct semaphore * sem)164 static inline void __up(struct semaphore *sem)
165 {
166 	long ret, tmp, tmp2, tmp3;
167 
168 	/* We must manipulate count and waking simultaneously and atomically.
169 	   Otherwise we have races between up and __down_failed_interruptible
170 	   waking up on a signal.
171 
172 	   "Equivalent" C.  Note that we have to do this all without
173 	   (taken) branches in order to be a valid ll/sc sequence.
174 
175 	   do {
176 		tmp = ldq_l;
177 		ret = (int)tmp + 1;			// count += 1;
178 		tmp2 = tmp & 0xffffffff00000000;	// extract waking
179 		if (ret <= 0)				// still sleepers?
180 			tmp2 += 0x0000000100000000;	// waking += 1;
181 		tmp = ret & 0x00000000ffffffff;		// insert count
182 		tmp |= tmp2;				// insert waking;
183 	       tmp = stq_c = tmp;
184 	   } while (tmp == 0);
185 	*/
186 
187 	__asm__ __volatile__(
188 		"	mb\n"
189 		"1:	ldq_l	%1,%4\n"
190 		"	addl	%1,1,%0\n"
191 		"	zapnot	%1,0xf0,%2\n"
192 		"	addq	%2,%5,%3\n"
193 		"	cmovle	%0,%3,%2\n"
194 		"	zapnot	%0,0x0f,%1\n"
195 		"	bis	%1,%2,%1\n"
196 		"	stq_c	%1,%4\n"
197 		"	beq	%1,3f\n"
198 		"2:\n"
199 		".subsection 2\n"
200 		"3:	br	1b\n"
201 		".previous"
202 		: "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(tmp3)
203 		: "m"(*sem), "r"(0x0000000100000000)
204 		: "memory");
205 
206 	if (unlikely(ret <= 0))
207 		__up_wakeup(sem);
208 }
209 
210 #if !WAITQUEUE_DEBUG && !defined(CONFIG_DEBUG_SEMAPHORE)
down(struct semaphore * sem)211 extern inline void down(struct semaphore *sem)
212 {
213 	__down(sem);
214 }
down_interruptible(struct semaphore * sem)215 extern inline int down_interruptible(struct semaphore *sem)
216 {
217 	return __down_interruptible(sem);
218 }
down_trylock(struct semaphore * sem)219 extern inline int down_trylock(struct semaphore *sem)
220 {
221 	return __down_trylock(sem);
222 }
up(struct semaphore * sem)223 extern inline void up(struct semaphore *sem)
224 {
225 	__up(sem);
226 }
227 #endif
228 
229 #endif
230