1 /*
2 * linux/include/asm-arm/semaphore.h
3 */
4 #ifndef __ASM_ARM_SEMAPHORE_H
5 #define __ASM_ARM_SEMAPHORE_H
6
7 #include <linux/linkage.h>
8 #include <linux/spinlock.h>
9 #include <linux/wait.h>
10 #include <linux/rwsem.h>
11
12 #include <asm/atomic.h>
13 #include <asm/proc/locks.h>
14
15 struct semaphore {
16 atomic_t count;
17 int sleepers;
18 wait_queue_head_t wait;
19 #if WAITQUEUE_DEBUG
20 long __magic;
21 #endif
22 };
23
24 #if WAITQUEUE_DEBUG
25 # define __SEM_DEBUG_INIT(name) \
26 , (long)&(name).__magic
27 #else
28 # define __SEM_DEBUG_INIT(name)
29 #endif
30
31 #define __SEMAPHORE_INIT(name,count) \
32 { ATOMIC_INIT(count), 0, \
33 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
34 __SEM_DEBUG_INIT(name) }
35
36 #define __MUTEX_INITIALIZER(name) \
37 __SEMAPHORE_INIT(name,1)
38
39 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
40 struct semaphore name = __SEMAPHORE_INIT(name,count)
41
42 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
43 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
44
sema_init(struct semaphore * sem,int val)45 static inline void sema_init(struct semaphore *sem, int val)
46 {
47 atomic_set(&sem->count, val);
48 sem->sleepers = 0;
49 init_waitqueue_head(&sem->wait);
50 #if WAITQUEUE_DEBUG
51 sem->__magic = (long)&sem->__magic;
52 #endif
53 }
54
init_MUTEX(struct semaphore * sem)55 static inline void init_MUTEX(struct semaphore *sem)
56 {
57 sema_init(sem, 1);
58 }
59
init_MUTEX_LOCKED(struct semaphore * sem)60 static inline void init_MUTEX_LOCKED(struct semaphore *sem)
61 {
62 sema_init(sem, 0);
63 }
64
65 /*
66 * special register calling convention
67 */
68 asmlinkage void __down_failed(void);
69 asmlinkage int __down_interruptible_failed(void);
70 asmlinkage int __down_trylock_failed(void);
71 asmlinkage void __up_wakeup(void);
72
73 extern void __down(struct semaphore * sem);
74 extern int __down_interruptible(struct semaphore * sem);
75 extern int __down_trylock(struct semaphore * sem);
76 extern void __up(struct semaphore * sem);
77
78 /*
79 * This is ugly, but we want the default case to fall through.
80 * "__down" is the actual routine that waits...
81 */
down(struct semaphore * sem)82 static inline void down(struct semaphore * sem)
83 {
84 #if WAITQUEUE_DEBUG
85 CHECK_MAGIC(sem->__magic);
86 #endif
87
88 __down_op(sem, __down_failed);
89 }
90
91 /*
92 * This is ugly, but we want the default case to fall through.
93 * "__down_interruptible" is the actual routine that waits...
94 */
down_interruptible(struct semaphore * sem)95 static inline int down_interruptible (struct semaphore * sem)
96 {
97 #if WAITQUEUE_DEBUG
98 CHECK_MAGIC(sem->__magic);
99 #endif
100
101 return __down_op_ret(sem, __down_interruptible_failed);
102 }
103
down_trylock(struct semaphore * sem)104 static inline int down_trylock(struct semaphore *sem)
105 {
106 #if WAITQUEUE_DEBUG
107 CHECK_MAGIC(sem->__magic);
108 #endif
109
110 return __down_op_ret(sem, __down_trylock_failed);
111 }
112
113 /*
114 * Note! This is subtle. We jump to wake people up only if
115 * the semaphore was negative (== somebody was waiting on it).
116 * The default case (no contention) will result in NO
117 * jumps for both down() and up().
118 */
up(struct semaphore * sem)119 static inline void up(struct semaphore * sem)
120 {
121 #if WAITQUEUE_DEBUG
122 CHECK_MAGIC(sem->__magic);
123 #endif
124
125 __up_op(sem, __up_wakeup);
126 }
127
sem_getcount(struct semaphore * sem)128 static inline int sem_getcount(struct semaphore *sem)
129 {
130 return atomic_read(&sem->count);
131 }
132
133 #endif
134