1 /*
2 * Alpha semaphore implementation.
3 *
4 * (C) Copyright 1996 Linus Torvalds
5 * (C) Copyright 1999, 2000 Richard Henderson
6 */
7
8 #include <linux/sched.h>
9
10
11 /*
12 * Semaphores are implemented using a two-way counter:
13 *
14 * The "count" variable is decremented for each process that tries to sleep,
15 * while the "waking" variable is incremented when the "up()" code goes to
16 * wake up waiting processes.
17 *
18 * Notably, the inline "up()" and "down()" functions can efficiently test
19 * if they need to do any extra work (up needs to do something only if count
20 * was negative before the increment operation.
21 *
22 * waking_non_zero() (from asm/semaphore.h) must execute atomically.
23 *
24 * When __up() is called, the count was negative before incrementing it,
25 * and we need to wake up somebody.
26 *
27 * This routine adds one to the count of processes that need to wake up and
28 * exit. ALL waiting processes actually wake up but only the one that gets
29 * to the "waking" field first will gate through and acquire the semaphore.
30 * The others will go back to sleep.
31 *
32 * Note that these functions are only called when there is contention on the
33 * lock, and as such all this is the "non-critical" part of the whole
34 * semaphore business. The critical part is the inline stuff in
35 * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
36 */
37
38 /*
39 * Perform the "down" function. Return zero for semaphore acquired,
40 * return negative for signalled out of the function.
41 *
42 * If called from down, the return is ignored and the wait loop is
43 * not interruptible. This means that a task waiting on a semaphore
44 * using "down()" cannot be killed until someone does an "up()" on
45 * the semaphore.
46 *
47 * If called from down_interruptible, the return value gets checked
48 * upon return. If the return value is negative then the task continues
49 * with the negative value in the return register (it can be tested by
50 * the caller).
51 *
52 * Either form may be used in conjunction with "up()".
53 */
54
55 void
__down_failed(struct semaphore * sem)56 __down_failed(struct semaphore *sem)
57 {
58 DECLARE_WAITQUEUE(wait, current);
59
60 #ifdef CONFIG_DEBUG_SEMAPHORE
61 printk("%s(%d): down failed(%p)\n",
62 current->comm, current->pid, sem);
63 #endif
64
65 current->state = TASK_UNINTERRUPTIBLE;
66 wmb();
67 add_wait_queue_exclusive(&sem->wait, &wait);
68
69 /* At this point we know that sem->count is negative. In order
70 to avoid racing with __up, we must check for wakeup before
71 going to sleep the first time. */
72
73 while (1) {
74 long ret, tmp;
75
76 /* An atomic conditional decrement of sem->waking. */
77 __asm__ __volatile__(
78 "1: ldl_l %1,%2\n"
79 " blt %1,2f\n"
80 " subl %1,1,%0\n"
81 " stl_c %0,%2\n"
82 " beq %0,3f\n"
83 "2:\n"
84 ".subsection 2\n"
85 "3: br 1b\n"
86 ".previous"
87 : "=r"(ret), "=&r"(tmp), "=m"(sem->waking)
88 : "0"(0));
89
90 if (ret)
91 break;
92
93 schedule();
94 set_task_state(current, TASK_UNINTERRUPTIBLE);
95 }
96
97 remove_wait_queue(&sem->wait, &wait);
98 current->state = TASK_RUNNING;
99
100 #ifdef CONFIG_DEBUG_SEMAPHORE
101 printk("%s(%d): down acquired(%p)\n",
102 current->comm, current->pid, sem);
103 #endif
104 }
105
106 int
__down_failed_interruptible(struct semaphore * sem)107 __down_failed_interruptible(struct semaphore *sem)
108 {
109 DECLARE_WAITQUEUE(wait, current);
110 long ret;
111
112 #ifdef CONFIG_DEBUG_SEMAPHORE
113 printk("%s(%d): down failed(%p)\n",
114 current->comm, current->pid, sem);
115 #endif
116
117 current->state = TASK_INTERRUPTIBLE;
118 wmb();
119 add_wait_queue_exclusive(&sem->wait, &wait);
120
121 while (1) {
122 long tmp, tmp2, tmp3;
123
124 /* We must undo the sem->count down_interruptible decrement
125 simultaneously and atomicly with the sem->waking
126 adjustment, otherwise we can race with __up. This is
127 accomplished by doing a 64-bit ll/sc on two 32-bit words.
128
129 "Equivalent" C. Note that we have to do this all without
130 (taken) branches in order to be a valid ll/sc sequence.
131
132 do {
133 tmp = ldq_l;
134 ret = 0;
135 if (tmp >= 0) { // waking >= 0
136 tmp += 0xffffffff00000000; // waking -= 1
137 ret = 1;
138 }
139 else if (pending) {
140 // count += 1, but since -1 + 1 carries into the
141 // high word, we have to be more careful here.
142 tmp = (tmp & 0xffffffff00000000)
143 | ((tmp + 1) & 0x00000000ffffffff);
144 ret = -EINTR;
145 }
146 tmp = stq_c = tmp;
147 } while (tmp == 0);
148 */
149
150 __asm__ __volatile__(
151 "1: ldq_l %1,%4\n"
152 " lda %0,0\n"
153 " cmovne %5,%6,%0\n"
154 " addq %1,1,%2\n"
155 " and %1,%7,%3\n"
156 " andnot %2,%7,%2\n"
157 " cmovge %1,1,%0\n"
158 " or %3,%2,%2\n"
159 " addq %1,%7,%3\n"
160 " cmovne %5,%2,%1\n"
161 " cmovge %2,%3,%1\n"
162 " stq_c %1,%4\n"
163 " beq %1,3f\n"
164 "2:\n"
165 ".subsection 2\n"
166 "3: br 1b\n"
167 ".previous"
168 : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2),
169 "=&r"(tmp3), "=m"(*sem)
170 : "r"(signal_pending(current)), "r"(-EINTR),
171 "r"(0xffffffff00000000));
172
173 /* At this point we have ret
174 1 got the lock
175 0 go to sleep
176 -EINTR interrupted */
177 if (ret != 0)
178 break;
179
180 schedule();
181 set_task_state(current, TASK_INTERRUPTIBLE);
182 }
183
184 remove_wait_queue(&sem->wait, &wait);
185 current->state = TASK_RUNNING;
186 wake_up(&sem->wait);
187
188 #ifdef CONFIG_DEBUG_SEMAPHORE
189 printk("%s(%d): down %s(%p)\n",
190 current->comm, current->pid,
191 (ret < 0 ? "interrupted" : "acquired"), sem);
192 #endif
193
194 /* Convert "got the lock" to 0==success. */
195 return (ret < 0 ? ret : 0);
196 }
197
198 void
__up_wakeup(struct semaphore * sem)199 __up_wakeup(struct semaphore *sem)
200 {
201 wake_up(&sem->wait);
202 }
203
204 void
down(struct semaphore * sem)205 down(struct semaphore *sem)
206 {
207 #if WAITQUEUE_DEBUG
208 CHECK_MAGIC(sem->__magic);
209 #endif
210 #ifdef CONFIG_DEBUG_SEMAPHORE
211 printk("%s(%d): down(%p) <count=%d> from %p\n",
212 current->comm, current->pid, sem,
213 atomic_read(&sem->count), __builtin_return_address(0));
214 #endif
215 __down(sem);
216 }
217
218 int
down_interruptible(struct semaphore * sem)219 down_interruptible(struct semaphore *sem)
220 {
221 #if WAITQUEUE_DEBUG
222 CHECK_MAGIC(sem->__magic);
223 #endif
224 #ifdef CONFIG_DEBUG_SEMAPHORE
225 printk("%s(%d): down(%p) <count=%d> from %p\n",
226 current->comm, current->pid, sem,
227 atomic_read(&sem->count), __builtin_return_address(0));
228 #endif
229 return __down_interruptible(sem);
230 }
231
232 int
down_trylock(struct semaphore * sem)233 down_trylock(struct semaphore *sem)
234 {
235 int ret;
236
237 #if WAITQUEUE_DEBUG
238 CHECK_MAGIC(sem->__magic);
239 #endif
240
241 ret = __down_trylock(sem);
242
243 #ifdef CONFIG_DEBUG_SEMAPHORE
244 printk("%s(%d): down_trylock %s from %p\n",
245 current->comm, current->pid,
246 ret ? "failed" : "acquired",
247 __builtin_return_address(0));
248 #endif
249
250 return ret;
251 }
252
253 void
up(struct semaphore * sem)254 up(struct semaphore *sem)
255 {
256 #if WAITQUEUE_DEBUG
257 CHECK_MAGIC(sem->__magic);
258 #endif
259 #ifdef CONFIG_DEBUG_SEMAPHORE
260 printk("%s(%d): up(%p) <count=%d> from %p\n",
261 current->comm, current->pid, sem,
262 atomic_read(&sem->count), __builtin_return_address(0));
263 #endif
264 __up(sem);
265 }
266