1 /*
2  * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
3  * in lib/rwsem.c.  Adapted largely from include/asm-i386/rwsem.h
4  * by Paul Mackerras <paulus@samba.org>.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #ifndef _PPC64_RWSEM_H
13 #define _PPC64_RWSEM_H
14 
15 #ifdef __KERNEL__
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
18 #include <asm/atomic.h>
19 #include <asm/system.h>
20 
21 /*
22  * the semaphore definition
23  */
24 struct rw_semaphore {
25 	/* XXX this should be able to be an atomic_t  -- paulus */
26 	signed int		count;
27 #define RWSEM_UNLOCKED_VALUE		0x00000000
28 #define RWSEM_ACTIVE_BIAS		0x00000001
29 #define RWSEM_ACTIVE_MASK		0x0000ffff
30 #define RWSEM_WAITING_BIAS		(-0x00010000)
31 #define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
32 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
33 	spinlock_t		wait_lock;
34 	struct list_head	wait_list;
35 #if RWSEM_DEBUG
36 	int			debug;
37 #endif
38 };
39 
40 /*
41  * initialisation
42  */
43 #if RWSEM_DEBUG
44 #define __RWSEM_DEBUG_INIT      , 0
45 #else
46 #define __RWSEM_DEBUG_INIT	/* */
47 #endif
48 
49 #define __RWSEM_INITIALIZER(name) \
50 	{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
51 	  LIST_HEAD_INIT((name).wait_list) \
52 	  __RWSEM_DEBUG_INIT }
53 
54 #define DECLARE_RWSEM(name)		\
55 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
56 
57 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
58 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
59 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
60 
init_rwsem(struct rw_semaphore * sem)61 static inline void init_rwsem(struct rw_semaphore *sem)
62 {
63 	sem->count = RWSEM_UNLOCKED_VALUE;
64 	spin_lock_init(&sem->wait_lock);
65 	INIT_LIST_HEAD(&sem->wait_list);
66 #if RWSEM_DEBUG
67 	sem->debug = 0;
68 #endif
69 }
70 
71 /*
72  * lock for reading
73  */
__down_read(struct rw_semaphore * sem)74 static inline void __down_read(struct rw_semaphore *sem)
75 {
76 	if (atomic_inc_return((atomic_t *)(&sem->count)) >= 0)
77 		smp_wmb();
78 	else
79 		rwsem_down_read_failed(sem);
80 }
81 
__down_read_trylock(struct rw_semaphore * sem)82 static inline int __down_read_trylock(struct rw_semaphore *sem)
83 {
84 	int tmp;
85 
86 	while ((tmp = sem->count) >= 0) {
87 		if (tmp == cmpxchg(&sem->count, tmp,
88 				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
89 			smp_wmb();
90 			return 1;
91 		}
92 	}
93 	return 0;
94 }
95 
96 /*
97  * lock for writing
98  */
__down_write(struct rw_semaphore * sem)99 static inline void __down_write(struct rw_semaphore *sem)
100 {
101 	int tmp;
102 
103 	tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
104 				(atomic_t *)(&sem->count));
105 	if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
106 		smp_wmb();
107 	else
108 		rwsem_down_write_failed(sem);
109 }
110 
__down_write_trylock(struct rw_semaphore * sem)111 static inline int __down_write_trylock(struct rw_semaphore *sem)
112 {
113 	int tmp;
114 
115 	tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
116 		      RWSEM_ACTIVE_WRITE_BIAS);
117 	smp_wmb();
118 	return tmp == RWSEM_UNLOCKED_VALUE;
119 }
120 
121 /*
122  * unlock after reading
123  */
__up_read(struct rw_semaphore * sem)124 static inline void __up_read(struct rw_semaphore *sem)
125 {
126 	int tmp;
127 
128 	smp_wmb();
129 	tmp = atomic_dec_return((atomic_t *)(&sem->count));
130 	if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
131 		rwsem_wake(sem);
132 }
133 
134 /*
135  * unlock after writing
136  */
__up_write(struct rw_semaphore * sem)137 static inline void __up_write(struct rw_semaphore *sem)
138 {
139 	smp_wmb();
140 	if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
141 			      (atomic_t *)(&sem->count)) < 0)
142 		rwsem_wake(sem);
143 }
144 
145 /*
146  * implement atomic add functionality
147  */
rwsem_atomic_add(int delta,struct rw_semaphore * sem)148 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
149 {
150 	atomic_add(delta, (atomic_t *)(&sem->count));
151 }
152 
153 /*
154  * implement exchange and add functionality
155  */
rwsem_atomic_update(int delta,struct rw_semaphore * sem)156 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
157 {
158 	smp_mb();
159 	return atomic_add_return(delta, (atomic_t *)(&sem->count));
160 }
161 
162 #endif /* __KERNEL__ */
163 #endif /* _PPC_RWSEM_XADD_H */
164