1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
7  *
8  * This implemenation of synchronization variables is heavily based on
9  * one done by Steve Lord <lord@sgi.com>
10  *
11  * Paul Cassella <pwc@sgi.com>
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 
19 #include <asm/semaphore.h>
20 #include <asm/hardirq.h>
21 #include <asm/softirq.h>
22 #include <asm/current.h>
23 
24 #include <asm/sn/sv.h>
25 
26 /* Define this to have sv_test() run some simple tests.
27    kernel_thread() must behave as expected when this is called.  */
28 #undef RUN_SV_TEST
29 
30 #define DEBUG
31 
32 /* Set up some macros so sv_wait(), sv_signal(), and sv_broadcast()
33    can sanity check interrupt state on architectures where we know
34    how. */
35 #ifdef DEBUG
36  #define SV_DEBUG_INTERRUPT_STATE
37  #ifdef __mips64
38   #define SV_TEST_INTERRUPTS_ENABLED(flags) ((flags & 0x1) != 0)
39   #define SV_TEST_INTERRUPTS_DISABLED(flags) ((flags & 0x1) == 0)
40   #define SV_INTERRUPT_TEST_WORKERS 31
41  #elif defined(__ia64)
42   #define SV_TEST_INTERRUPTS_ENABLED(flags) ((flags & 0x4000) != 0)
43   #define SV_TEST_INTERRUPTS_DISABLED(flags) ((flags & 0x4000) == 0)
44   #define SV_INTERRUPT_TEST_WORKERS 4 /* simulator's slow */
45  #else
46   #undef  SV_DEBUG_INTERRUPT_STATE
47   #define SV_INTERRUPT_TEST_WORKERS 4 /* reasonable? default. */
48  #endif /* __mips64 */
49 #endif /* DEBUG */
50 
51 
52 /* XXX FIXME hack hack hack.  Our mips64 tree is from before the
53    switch to WQ_FLAG_EXCLUSIVE, and our ia64 tree is from after it. */
54 #ifdef TASK_EXCLUSIVE
55   #undef EXCLUSIVE_IN_QUEUE
56 #else
57   #define EXCLUSIVE_IN_QUEUE
58   #define TASK_EXCLUSIVE 0 /* for the set_current_state() in sv_wait() */
59 #endif
60 
61 
sv_lock(sv_t * sv)62 static inline void sv_lock(sv_t *sv) {
63 	spin_lock(&sv->sv_lock);
64 }
65 
sv_unlock(sv_t * sv)66 static inline void sv_unlock(sv_t *sv) {
67 	spin_unlock(&sv->sv_lock);
68 }
69 
70 /* up() is "extern inline", so we can't pass its address to sv_wait.
71    Use this function's address instead. */
up_wrapper(struct semaphore * sem)72 static void up_wrapper(struct semaphore *sem) {
73 	up(sem);
74 }
75 
76 /* spin_unlock() is sometimes a macro. */
spin_unlock_wrapper(spinlock_t * s)77 static void spin_unlock_wrapper(spinlock_t *s) {
78 	spin_unlock(s);
79 }
80 
81 /* XXX Perhaps sv_wait() should do the switch() each time and avoid
82    the extra indirection and the need for the _wrapper functions? */
83 
sv_set_mon_type(sv_t * sv,int type)84 static inline void sv_set_mon_type(sv_t *sv, int type) {
85 	switch (type) {
86 	case SV_MON_SPIN:
87 		sv->sv_mon_unlock_func =
88 		  (sv_mon_unlock_func_t)spin_unlock_wrapper;
89 		break;
90 	case SV_MON_SEMA:
91 		sv->sv_mon_unlock_func =
92 		  (sv_mon_unlock_func_t)up_wrapper;
93 		if(sv->sv_flags & SV_INTS) {
94 			printk(KERN_ERR "sv_set_mon_type: The monitor lock "
95 			       "cannot be shared with interrupts if it is a "
96 			       "semaphore!\n");
97 			BUG();
98 		}
99 		if(sv->sv_flags & SV_BHS) {
100 			printk(KERN_ERR "sv_set_mon_type: The monitor lock "
101 			       "cannot be shared with bottom-halves if it is "
102 			       "a semaphore!\n");
103 			BUG();
104 		}
105 		break;
106 #if 0
107 	/*
108 	 * If needed, and will need to think about interrupts.  This
109 	 * may be needed, for example, if someone wants to use sv's
110 	 * with something like dev_base; writers need to hold two
111 	 * locks.
112 	 */
113 	case SV_MON_CUSTOM:
114 		{
115 		struct sv_mon_custom *c = lock;
116 		sv->sv_mon_unlock_func = c->sv_mon_unlock_func;
117 		sv->sv_mon_lock        = c->sv_mon_lock;
118 		break;
119 		}
120 #endif
121 
122 	default:
123 		printk(KERN_ERR "sv_set_mon_type: unknown type %d (0x%x)! "
124 		       "(flags 0x%x)\n", type, type, sv->sv_flags);
125 		BUG();
126 		break;
127 	}
128 	sv->sv_flags |= type;
129 }
130 
sv_set_ord(sv_t * sv,int ord)131 static inline void sv_set_ord(sv_t *sv, int ord) {
132 	if (!ord)
133 		ord = SV_ORDER_DEFAULT;
134 
135 	if (ord != SV_ORDER_FIFO && ord != SV_ORDER_LIFO) {
136 		printk(KERN_EMERG "sv_set_ord: unknown order %d (0x%x)! ",
137 		       ord, ord);
138 		BUG();
139 	}
140 
141 	sv->sv_flags |= ord;
142 }
143 
sv_init(sv_t * sv,sv_mon_lock_t * lock,int flags)144 void sv_init(sv_t *sv, sv_mon_lock_t *lock, int flags)
145 {
146 	int ord = flags & SV_ORDER_MASK;
147 	int type = flags & SV_MON_MASK;
148 
149 	/* Copy all non-order, non-type flags */
150 	sv->sv_flags = (flags & ~(SV_ORDER_MASK | SV_MON_MASK));
151 
152 	if((sv->sv_flags & (SV_INTS | SV_BHS)) == (SV_INTS | SV_BHS)) {
153 	  printk(KERN_ERR "sv_init: do not set both SV_INTS and SV_BHS, only SV_INTS.\n");
154 	  BUG();
155 	}
156 
157 	sv_set_ord(sv, ord);
158 	sv_set_mon_type(sv, type);
159 
160 	/* If lock is NULL, we'll get it from sv_wait_compat() (and
161            ignore it in sv_signal() and sv_broadcast()). */
162 	sv->sv_mon_lock = lock;
163 
164 	spin_lock_init(&sv->sv_lock);
165 	init_waitqueue_head(&sv->sv_waiters);
166 }
167 
168 /*
169  * The associated lock must be locked on entry.  It is unlocked on return.
170  *
171  * Return values:
172  *
173  * n < 0 : interrupted,  -n jiffies remaining on timeout, or -1 if timeout == 0
174  * n = 0 : timeout expired
175  * n > 0 : sv_signal()'d, n jiffies remaining on timeout, or 1 if timeout == 0
176  */
sv_wait(sv_t * sv,int sv_wait_flags,unsigned long timeout)177 signed long sv_wait(sv_t *sv, int sv_wait_flags, unsigned long timeout)
178 {
179 	DECLARE_WAITQUEUE( wait, current );
180 	unsigned long flags;
181 	signed long ret = 0;
182 
183 #ifdef SV_DEBUG_INTERRUPT_STATE
184 	{
185 	unsigned long flags;
186 	__save_flags(flags);
187 
188 	if(sv->sv_flags & SV_INTS) {
189 		if(SV_TEST_INTERRUPTS_ENABLED(flags)) {
190 			printk(KERN_ERR "sv_wait: SV_INTS and interrupts "
191 			       "enabled (flags: 0x%lx)\n", flags);
192 			BUG();
193 		}
194 	} else {
195 		if (SV_TEST_INTERRUPTS_DISABLED(flags)) {
196 			printk(KERN_WARNING "sv_wait: !SV_INTS and interrupts "
197 			       "disabled! (flags: 0x%lx)\n", flags);
198 		}
199 	}
200 	}
201 #endif  /* SV_DEBUG_INTERRUPT_STATE */
202 
203 	sv_lock(sv);
204 
205 	sv->sv_mon_unlock_func(sv->sv_mon_lock);
206 
207 	/* Add ourselves to the wait queue and set the state before
208 	 * releasing the sv_lock so as to avoid racing with the
209 	 * wake_up() in sv_signal() and sv_broadcast().
210 	 */
211 
212 	/* don't need the _irqsave part, but there is no wq_write_lock() */
213 	wq_write_lock_irqsave(&sv->sv_waiters.lock, flags);
214 
215 #ifdef EXCLUSIVE_IN_QUEUE
216 	wait.flags |= WQ_FLAG_EXCLUSIVE;
217 #endif
218 
219 	switch(sv->sv_flags & SV_ORDER_MASK) {
220 	case SV_ORDER_FIFO:
221 		__add_wait_queue_tail(&sv->sv_waiters, &wait);
222 		break;
223 	case SV_ORDER_FILO:
224 		__add_wait_queue(&sv->sv_waiters, &wait);
225 		break;
226 	default:
227 		printk(KERN_ERR "sv_wait: unknown order!  (sv: 0x%p, flags: 0x%x)\n",
228 					(void *)sv, sv->sv_flags);
229 		BUG();
230 	}
231 	wq_write_unlock_irqrestore(&sv->sv_waiters.lock, flags);
232 
233 	if(sv_wait_flags & SV_WAIT_SIG)
234 		set_current_state(TASK_EXCLUSIVE | TASK_INTERRUPTIBLE  );
235 	else
236 		set_current_state(TASK_EXCLUSIVE | TASK_UNINTERRUPTIBLE);
237 
238 	spin_unlock(&sv->sv_lock);
239 
240 	if(sv->sv_flags & SV_INTS)
241 		local_irq_enable();
242 	else if(sv->sv_flags & SV_BHS)
243 		local_bh_enable();
244 
245 	if (timeout)
246 		ret = schedule_timeout(timeout);
247 	else
248 		schedule();
249 
250 	if(current->state != TASK_RUNNING) /* XXX Is this possible? */ {
251 		printk(KERN_ERR "sv_wait: state not TASK_RUNNING after "
252 		       "schedule().\n");
253 		set_current_state(TASK_RUNNING);
254 	}
255 
256 	remove_wait_queue(&sv->sv_waiters, &wait);
257 
258 	/* Return cases:
259 	   - woken by a sv_signal/sv_broadcast
260 	   - woken by a signal
261 	   - woken by timeout expiring
262 	*/
263 
264 	/* XXX This isn't really accurate; we may have been woken
265            before the signal anyway.... */
266 	if(signal_pending(current))
267 		return timeout ? -ret : -1;
268 	return timeout ? ret : 1;
269 }
270 
271 
sv_signal(sv_t * sv)272 void sv_signal(sv_t *sv)
273 {
274 	/* If interrupts can acquire this lock, they can also acquire the
275 	   sv_mon_lock, which we must already have to have called this, so
276 	   interrupts must be disabled already.  If interrupts cannot
277 	   contend for this lock, we don't have to worry about it. */
278 
279 #ifdef SV_DEBUG_INTERRUPT_STATE
280 	if(sv->sv_flags & SV_INTS) {
281 		unsigned long flags;
282 		__save_flags(flags);
283 		if(SV_TEST_INTERRUPTS_ENABLED(flags))
284 			printk(KERN_ERR "sv_signal: SV_INTS and "
285 			"interrupts enabled! (flags: 0x%lx)\n", flags);
286 	}
287 #endif /* SV_DEBUG_INTERRUPT_STATE */
288 
289 	sv_lock(sv);
290 	wake_up(&sv->sv_waiters);
291 	sv_unlock(sv);
292 }
293 
sv_broadcast(sv_t * sv)294 void sv_broadcast(sv_t *sv)
295 {
296 #ifdef SV_DEBUG_INTERRUPT_STATE
297 	if(sv->sv_flags & SV_INTS) {
298 		unsigned long flags;
299 		__save_flags(flags);
300 		if(SV_TEST_INTERRUPTS_ENABLED(flags))
301 			printk(KERN_ERR "sv_broadcast: SV_INTS and "
302 			       "interrupts enabled! (flags: 0x%lx)\n", flags);
303 	}
304 #endif /* SV_DEBUG_INTERRUPT_STATE */
305 
306 	sv_lock(sv);
307 	wake_up_all(&sv->sv_waiters);
308 	sv_unlock(sv);
309 }
310 
sv_destroy(sv_t * sv)311 void sv_destroy(sv_t *sv)
312 {
313 	if(!spin_trylock(&sv->sv_lock)) {
314 		printk(KERN_ERR "sv_destroy: someone else has sv 0x%p locked!\n", (void *)sv);
315 		BUG();
316 	}
317 
318 	/* XXX Check that the waitqueue is empty?
319 	       Mark the sv destroyed?
320 	*/
321 }
322 
323 
324 #ifdef RUN_SV_TEST
325 
326 static DECLARE_MUTEX_LOCKED(talkback);
327 static DECLARE_MUTEX_LOCKED(sem);
328 sv_t sv;
329 sv_t sv_filo;
330 
sv_test_1_w(void * arg)331 static int sv_test_1_w(void *arg)
332 {
333 	printk("sv_test_1_w: acquiring spinlock 0x%p...\n", arg);
334 
335 	spin_lock((spinlock_t*)arg);
336 	printk("sv_test_1_w: spinlock acquired, waking sv_test_1_s.\n");
337 
338 	up(&sem);
339 
340 	printk("sv_test_1_w: sv_spin_wait()'ing.\n");
341 
342 	sv_spin_wait(&sv, arg);
343 
344 	printk("sv_test_1_w: talkback.\n");
345 	up(&talkback);
346 
347 	printk("sv_test_1_w: exiting.\n");
348 	return 0;
349 }
350 
sv_test_1_s(void * arg)351 static int sv_test_1_s(void *arg)
352 {
353 	printk("sv_test_1_s: waiting for semaphore.\n");
354 	down(&sem);
355 	printk("sv_test_1_s: semaphore acquired.  Acquiring spinlock.\n");
356 	spin_lock((spinlock_t*)arg);
357 	printk("sv_test_1_s: spinlock acquired.  sv_signaling.\n");
358 	sv_signal(&sv);
359 	printk("sv_test_1_s: talkback.\n");
360 	up(&talkback);
361 	printk("sv_test_1_s: exiting.\n");
362 	return 0;
363 
364 }
365 
366 static int count;
367 static DECLARE_MUTEX(monitor);
368 
sv_test_2_w(void * arg)369 static int sv_test_2_w(void *arg)
370 {
371 	int dummy = count++;
372 	sv_t *sv = (sv_t *)arg;
373 
374 	down(&monitor);
375 	up(&talkback);
376 	printk("sv_test_2_w: thread %d started, sv_waiting.\n", dummy);
377 	sv_sema_wait(sv, &monitor);
378 	printk("sv_test_2_w: thread %d woken, exiting.\n", dummy);
379 	up(&sem);
380 	return 0;
381 }
382 
sv_test_2_s_1(void * arg)383 static int sv_test_2_s_1(void *arg)
384 {
385 	int i;
386 	sv_t *sv = (sv_t *)arg;
387 
388 	down(&monitor);
389 	for(i = 0; i < 3; i++) {
390 		printk("sv_test_2_s_1: waking one thread.\n");
391 		sv_signal(sv);
392 		down(&sem);
393 	}
394 
395 	printk("sv_test_2_s_1: signaling and broadcasting again.  Nothing should happen.\n");
396 	sv_signal(sv);
397 	sv_broadcast(sv);
398 	sv_signal(sv);
399 	sv_broadcast(sv);
400 
401 	printk("sv_test_2_s_1: talkbacking.\n");
402 	up(&talkback);
403 	up(&monitor);
404 	return 0;
405 }
406 
sv_test_2_s(void * arg)407 static int sv_test_2_s(void *arg)
408 {
409 	int i;
410 	sv_t *sv = (sv_t *)arg;
411 
412 	down(&monitor);
413 	for(i = 0; i < 3; i++) {
414 		printk("sv_test_2_s: waking one thread (should be %d.)\n", i);
415 		sv_signal(sv);
416 		down(&sem);
417 	}
418 
419 	printk("sv_test_3_s: waking remaining threads with broadcast.\n");
420 	sv_broadcast(sv);
421 	for(; i < 10; i++)
422 		down(&sem);
423 
424 	printk("sv_test_3_s: sending talkback.\n");
425 	up(&talkback);
426 
427 	printk("sv_test_3_s: exiting.\n");
428 	up(&monitor);
429 	return 0;
430 }
431 
432 
big_test(sv_t * sv)433 static void big_test(sv_t *sv)
434 {
435 	int i;
436 
437 	count = 0;
438 
439 	for(i = 0; i < 3; i++) {
440 		printk("big_test: spawning thread %d.\n", i);
441 		kernel_thread(sv_test_2_w, sv, 0);
442 		down(&talkback);
443 	}
444 
445 	printk("big_test: spawning first wake-up thread.\n");
446 	kernel_thread(sv_test_2_s_1, sv, 0);
447 
448 	down(&talkback);
449 	printk("big_test: talkback happened.\n");
450 
451 
452 	for(i = 3; i < 13; i++) {
453 		printk("big_test: spawning thread %d.\n", i);
454 		kernel_thread(sv_test_2_w, sv, 0);
455 		down(&talkback);
456 	}
457 
458 	printk("big_test: spawning wake-up thread.\n");
459 	kernel_thread(sv_test_2_s, sv, 0);
460 
461 	down(&talkback);
462 }
463 
464 sv_t int_test_sv;
465 spinlock_t int_test_spin = SPIN_LOCK_UNLOCKED;
466 int int_test_ready;
467 static int irqtestcount;
468 
interrupt_test_worker(void * unused)469 static int interrupt_test_worker(void *unused)
470 {
471 	int id = ++irqtestcount;
472 	int it = 0;
473 			unsigned long flags, flags2;
474 
475 	printk("ITW: thread %d started.\n", id);
476 
477 	while(1) {
478 		__save_flags(flags2);
479 		if(jiffies % 3) {
480 			printk("ITW %2d %5d: irqsaving          (%lx)\n", id, it, flags2);
481 			spin_lock_irqsave(&int_test_spin, flags);
482 		} else {
483 			printk("ITW %2d %5d: spin_lock_irqing   (%lx)\n", id, it, flags2);
484 			spin_lock_irq(&int_test_spin);
485 		}
486 
487 		__save_flags(flags2);
488 		printk("ITW %2d %5d: locked, sv_waiting (%lx).\n", id, it, flags2);
489 		sv_wait(&int_test_sv, 0, 0);
490 
491 		__save_flags(flags2);
492 		printk("ITW %2d %5d: wait finished      (%lx), pausing\n", id, it, flags2);
493 		set_current_state(TASK_INTERRUPTIBLE);
494 		schedule_timeout(jiffies & 0xf);
495 		if(current->state != TASK_RUNNING)
496 		  printk("ITW:  current->state isn't RUNNING after schedule!\n");
497 		it++;
498 	}
499 }
500 
interrupt_test(void)501 static void interrupt_test(void)
502 {
503 	int i;
504 
505 	printk("interrupt_test: initing sv.\n");
506 	sv_init(&int_test_sv, &int_test_spin, SV_MON_SPIN | SV_INTS);
507 
508 	for(i = 0; i < SV_INTERRUPT_TEST_WORKERS; i++) {
509 		printk("interrupt_test: starting test thread %d.\n", i);
510 		kernel_thread(interrupt_test_worker, 0, 0);
511 	}
512 	printk("interrupt_test: done with init part.\n");
513 	int_test_ready = 1;
514 }
515 
sv_test(void)516 int sv_test(void)
517 {
518 	spinlock_t s = SPIN_LOCK_UNLOCKED;
519 
520 	sv_init(&sv, &s, SV_MON_SPIN);
521 	printk("sv_test: starting sv_test_1_w.\n");
522 	kernel_thread(sv_test_1_w, &s, 0);
523 	printk("sv_test: starting sv_test_1_s.\n");
524 	kernel_thread(sv_test_1_s, &s, 0);
525 
526 	printk("sv_test: waiting for talkback.\n");
527 	down(&talkback); down(&talkback);
528 	printk("sv_test: talkback happened, sv_destroying.\n");
529 	sv_destroy(&sv);
530 
531 	count = 0;
532 
533 	printk("sv_test: beginning big_test on sv.\n");
534 
535 	sv_init(&sv, &monitor, SV_MON_SEMA);
536 	big_test(&sv);
537 	sv_destroy(&sv);
538 
539 	printk("sv_test: beginning big_test on sv_filo.\n");
540 	sv_init(&sv_filo, &monitor, SV_MON_SEMA | SV_ORDER_FILO);
541 	big_test(&sv_filo);
542 	sv_destroy(&sv_filo);
543 
544 	interrupt_test();
545 
546 	printk("sv_test: done.\n");
547 	return 0;
548 }
549 
550 __initcall(sv_test);
551 
552 #endif /* RUN_SV_TEST */
553