1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
2 * implementation
3 *
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
7 *
8 * Trylock by Brian Watson (Brian.J.Watson@compaq.com).
9 */
10 #include <linux/rwsem.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14
15 struct rwsem_waiter {
16 struct list_head list;
17 struct task_struct *task;
18 unsigned int flags;
19 #define RWSEM_WAITING_FOR_READ 0x00000001
20 #define RWSEM_WAITING_FOR_WRITE 0x00000002
21 };
22
23 #if RWSEM_DEBUG
rwsemtrace(struct rw_semaphore * sem,const char * str)24 void rwsemtrace(struct rw_semaphore *sem, const char *str)
25 {
26 if (sem->debug)
27 printk("[%d] %s({%d,%d})\n",
28 current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
29 }
30 #endif
31
32 /*
33 * initialise the semaphore
34 */
init_rwsem(struct rw_semaphore * sem)35 void fastcall init_rwsem(struct rw_semaphore *sem)
36 {
37 sem->activity = 0;
38 spin_lock_init(&sem->wait_lock);
39 INIT_LIST_HEAD(&sem->wait_list);
40 #if RWSEM_DEBUG
41 sem->debug = 0;
42 #endif
43 }
44
45 /*
46 * handle the lock being released whilst there are processes blocked on it that can now run
47 * - if we come here, then:
48 * - the 'active count' _reached_ zero
49 * - the 'waiting count' is non-zero
50 * - the spinlock must be held by the caller
51 * - woken process blocks are discarded from the list after having flags zeroised
52 */
__rwsem_do_wake(struct rw_semaphore * sem)53 static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
54 {
55 struct rwsem_waiter *waiter;
56 struct task_struct *tsk;
57 int woken;
58
59 rwsemtrace(sem,"Entering __rwsem_do_wake");
60
61 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
62
63 /* try to grant a single write lock if there's a writer at the front of the queue
64 * - we leave the 'waiting count' incremented to signify potential contention
65 */
66 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
67 sem->activity = -1;
68 list_del(&waiter->list);
69 tsk = waiter->task;
70 mb();
71 waiter->task = NULL;
72 wake_up_process(tsk);
73 free_task_struct(tsk);
74 goto out;
75 }
76
77 /* grant an infinite number of read locks to the readers at the front of the queue */
78 woken = 0;
79 do {
80 list_del(&waiter->list);
81 tsk = waiter->task;
82 mb();
83 waiter->task = NULL;
84 wake_up_process(tsk);
85 free_task_struct(tsk);
86 woken++;
87 if (list_empty(&sem->wait_list))
88 break;
89 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
90 } while (waiter->flags&RWSEM_WAITING_FOR_READ);
91
92 sem->activity += woken;
93
94 out:
95 rwsemtrace(sem,"Leaving __rwsem_do_wake");
96 return sem;
97 }
98
99 /*
100 * wake a single writer
101 */
__rwsem_wake_one_writer(struct rw_semaphore * sem)102 static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
103 {
104 struct rwsem_waiter *waiter;
105 struct task_struct *tsk;
106
107 sem->activity = -1;
108
109 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
110 list_del(&waiter->list);
111
112 tsk = waiter->task;
113 mb();
114 waiter->task = NULL;
115 wake_up_process(tsk);
116 free_task_struct(tsk);
117 return sem;
118 }
119
120 /*
121 * get a read lock on the semaphore
122 */
__down_read(struct rw_semaphore * sem)123 void fastcall __down_read(struct rw_semaphore *sem)
124 {
125 struct rwsem_waiter waiter;
126 struct task_struct *tsk;
127
128 rwsemtrace(sem,"Entering __down_read");
129
130 spin_lock_irq(&sem->wait_lock);
131
132 if (sem->activity>=0 && list_empty(&sem->wait_list)) {
133 /* granted */
134 sem->activity++;
135 spin_unlock_irq(&sem->wait_lock);
136 goto out;
137 }
138
139 tsk = current;
140 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
141
142 /* set up my own style of waitqueue */
143 waiter.task = tsk;
144 waiter.flags = RWSEM_WAITING_FOR_READ;
145 get_task_struct(tsk);
146
147 list_add_tail(&waiter.list,&sem->wait_list);
148
149 /* we don't need to touch the semaphore struct anymore */
150 spin_unlock_irq(&sem->wait_lock);
151
152 /* wait to be given the lock */
153 for (;;) {
154 if (!waiter.task)
155 break;
156 schedule();
157 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
158 }
159
160 tsk->state = TASK_RUNNING;
161
162 out:
163 rwsemtrace(sem,"Leaving __down_read");
164 }
165
166 /*
167 * trylock for reading -- returns 1 if successful, 0 if contention
168 */
__down_read_trylock(struct rw_semaphore * sem)169 int fastcall __down_read_trylock(struct rw_semaphore *sem)
170 {
171 int ret = 0;
172 unsigned long flags;
173 rwsemtrace(sem,"Entering __down_read_trylock");
174
175 spin_lock_irqsave(&sem->wait_lock, flags);
176
177 if (sem->activity>=0 && list_empty(&sem->wait_list)) {
178 /* granted */
179 sem->activity++;
180 ret = 1;
181 }
182
183 spin_unlock_irqrestore(&sem->wait_lock, flags);
184
185 rwsemtrace(sem,"Leaving __down_read_trylock");
186 return ret;
187 }
188
189 /*
190 * get a write lock on the semaphore
191 * - note that we increment the waiting count anyway to indicate an exclusive lock
192 */
__down_write(struct rw_semaphore * sem)193 void fastcall __down_write(struct rw_semaphore *sem)
194 {
195 struct rwsem_waiter waiter;
196 struct task_struct *tsk;
197
198 rwsemtrace(sem,"Entering __down_write");
199
200 spin_lock_irq(&sem->wait_lock);
201
202 if (sem->activity==0 && list_empty(&sem->wait_list)) {
203 /* granted */
204 sem->activity = -1;
205 spin_unlock_irq(&sem->wait_lock);
206 goto out;
207 }
208
209 tsk = current;
210 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
211
212 /* set up my own style of waitqueue */
213 waiter.task = tsk;
214 waiter.flags = RWSEM_WAITING_FOR_WRITE;
215 get_task_struct(tsk);
216
217 list_add_tail(&waiter.list,&sem->wait_list);
218
219 /* we don't need to touch the semaphore struct anymore */
220 spin_unlock_irq(&sem->wait_lock);
221
222 /* wait to be given the lock */
223 for (;;) {
224 if (!waiter.task)
225 break;
226 schedule();
227 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
228 }
229
230 tsk->state = TASK_RUNNING;
231
232 out:
233 rwsemtrace(sem,"Leaving __down_write");
234 }
235
236 /*
237 * trylock for writing -- returns 1 if successful, 0 if contention
238 */
__down_write_trylock(struct rw_semaphore * sem)239 int fastcall __down_write_trylock(struct rw_semaphore *sem)
240 {
241 int ret = 0;
242 unsigned long flags;
243 rwsemtrace(sem,"Entering __down_write_trylock");
244
245 spin_lock_irqsave(&sem->wait_lock, flags);
246
247 if (sem->activity==0 && list_empty(&sem->wait_list)) {
248 /* granted */
249 sem->activity = -1;
250 ret = 1;
251 }
252
253 spin_unlock_irqrestore(&sem->wait_lock, flags);
254
255 rwsemtrace(sem,"Leaving __down_write_trylock");
256 return ret;
257 }
258
259 /*
260 * release a read lock on the semaphore
261 */
__up_read(struct rw_semaphore * sem)262 void fastcall __up_read(struct rw_semaphore *sem)
263 {
264 unsigned long flags;
265 rwsemtrace(sem,"Entering __up_read");
266
267 spin_lock_irqsave(&sem->wait_lock, flags);
268
269 if (--sem->activity==0 && !list_empty(&sem->wait_list))
270 sem = __rwsem_wake_one_writer(sem);
271
272 spin_unlock_irqrestore(&sem->wait_lock, flags);
273
274 rwsemtrace(sem,"Leaving __up_read");
275 }
276
277 /*
278 * release a write lock on the semaphore
279 */
__up_write(struct rw_semaphore * sem)280 void fastcall __up_write(struct rw_semaphore *sem)
281 {
282 unsigned long flags;
283 rwsemtrace(sem,"Entering __up_write");
284
285 spin_lock_irqsave(&sem->wait_lock, flags);
286
287 sem->activity = 0;
288 if (!list_empty(&sem->wait_list))
289 sem = __rwsem_do_wake(sem);
290
291 spin_unlock_irqrestore(&sem->wait_lock, flags);
292
293 rwsemtrace(sem,"Leaving __up_write");
294 }
295
296 EXPORT_SYMBOL(init_rwsem);
297 EXPORT_SYMBOL(__down_read);
298 EXPORT_SYMBOL(__down_write);
299 EXPORT_SYMBOL(__up_read);
300 EXPORT_SYMBOL(__up_write);
301 #if RWSEM_DEBUG
302 EXPORT_SYMBOL(rwsemtrace);
303 #endif
304