1 /* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
5 */
6 #include <linux/rwsem.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9 #include <linux/module.h>
10
11 struct rwsem_waiter {
12 struct list_head list;
13 struct task_struct *task;
14 unsigned int flags;
15 #define RWSEM_WAITING_FOR_READ 0x00000001
16 #define RWSEM_WAITING_FOR_WRITE 0x00000002
17 };
18
19 #if RWSEM_DEBUG
20 #undef rwsemtrace
rwsemtrace(struct rw_semaphore * sem,const char * str)21 void rwsemtrace(struct rw_semaphore *sem, const char *str)
22 {
23 printk("sem=%p\n",sem);
24 printk("(sem)=%08lx\n",sem->count);
25 if (sem->debug)
26 printk("[%d] %s({%08lx})\n",current->pid,str,sem->count);
27 }
28 #endif
29
30 /*
31 * handle the lock being released whilst there are processes blocked on it that can now run
32 * - if we come here, then:
33 * - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented
34 * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
35 * - there must be someone on the queue
36 * - the spinlock must be held by the caller
37 * - woken process blocks are discarded from the list after having flags zeroised
38 */
__rwsem_do_wake(struct rw_semaphore * sem)39 static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
40 {
41 struct rwsem_waiter *waiter;
42 struct task_struct *tsk;
43 struct list_head *next;
44 signed long oldcount, woken, loop;
45
46 rwsemtrace(sem,"Entering __rwsem_do_wake");
47
48 /* only wake someone up if we can transition the active part of the count from 0 -> 1 */
49 try_again:
50 oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
51 if (oldcount & RWSEM_ACTIVE_MASK)
52 goto undo;
53
54 waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
55
56 /* try to grant a single write lock if there's a writer at the front of the queue
57 * - note we leave the 'active part' of the count incremented by 1 and the waiting part
58 * incremented by 0x00010000
59 */
60 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
61 goto readers_only;
62
63 list_del(&waiter->list);
64 tsk = waiter->task;
65 mb();
66 waiter->task = NULL;
67 wake_up_process(tsk);
68 free_task_struct(tsk);
69 goto out;
70
71 /* grant an infinite number of read locks to the readers at the front of the queue
72 * - note we increment the 'active part' of the count by the number of readers (less one
73 * for the activity decrement we've already done) before waking any processes up
74 */
75 readers_only:
76 woken = 0;
77 do {
78 woken++;
79
80 if (waiter->list.next==&sem->wait_list)
81 break;
82
83 waiter = list_entry(waiter->list.next,struct rwsem_waiter,list);
84
85 } while (waiter->flags & RWSEM_WAITING_FOR_READ);
86
87 loop = woken;
88 woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS;
89 woken -= RWSEM_ACTIVE_BIAS;
90 rwsem_atomic_add(woken,sem);
91
92 next = sem->wait_list.next;
93 for (; loop>0; loop--) {
94 waiter = list_entry(next,struct rwsem_waiter,list);
95 next = waiter->list.next;
96 tsk = waiter->task;
97 mb();
98 waiter->task = NULL;
99 wake_up_process(tsk);
100 free_task_struct(tsk);
101 }
102
103 sem->wait_list.next = next;
104 next->prev = &sem->wait_list;
105
106 out:
107 rwsemtrace(sem,"Leaving __rwsem_do_wake");
108 return sem;
109
110 /* undo the change to count, but check for a transition 1->0 */
111 undo:
112 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0)
113 goto out;
114 goto try_again;
115 }
116
117 /*
118 * wait for a lock to be granted
119 */
rwsem_down_failed_common(struct rw_semaphore * sem,struct rwsem_waiter * waiter,signed long adjustment)120 static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem,
121 struct rwsem_waiter *waiter,
122 signed long adjustment)
123 {
124 struct task_struct *tsk = current;
125 signed long count;
126
127 set_task_state(tsk,TASK_UNINTERRUPTIBLE);
128
129 /* set up my own style of waitqueue */
130 spin_lock_irq(&sem->wait_lock);
131 waiter->task = tsk;
132 get_task_struct(tsk);
133
134 list_add_tail(&waiter->list,&sem->wait_list);
135
136 /* note that we're now waiting on the lock, but no longer actively read-locking */
137 count = rwsem_atomic_update(adjustment,sem);
138
139 /* if there are no longer active locks, wake the front queued process(es) up
140 * - it might even be this process, since the waker takes a more active part
141 */
142 if (!(count & RWSEM_ACTIVE_MASK))
143 sem = __rwsem_do_wake(sem);
144
145 spin_unlock_irq(&sem->wait_lock);
146
147 /* wait to be given the lock */
148 for (;;) {
149 if (!waiter->task)
150 break;
151 schedule();
152 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
153 }
154
155 tsk->state = TASK_RUNNING;
156
157 return sem;
158 }
159
160 /*
161 * wait for the read lock to be granted
162 */
rwsem_down_read_failed(struct rw_semaphore * sem)163 struct rw_semaphore fastcall *rwsem_down_read_failed(struct rw_semaphore *sem)
164 {
165 struct rwsem_waiter waiter;
166
167 rwsemtrace(sem,"Entering rwsem_down_read_failed");
168
169 waiter.flags = RWSEM_WAITING_FOR_READ;
170 rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS);
171
172 rwsemtrace(sem,"Leaving rwsem_down_read_failed");
173 return sem;
174 }
175
176 /*
177 * wait for the write lock to be granted
178 */
rwsem_down_write_failed(struct rw_semaphore * sem)179 struct rw_semaphore fastcall *rwsem_down_write_failed(struct rw_semaphore *sem)
180 {
181 struct rwsem_waiter waiter;
182
183 rwsemtrace(sem,"Entering rwsem_down_write_failed");
184
185 waiter.flags = RWSEM_WAITING_FOR_WRITE;
186 rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS);
187
188 rwsemtrace(sem,"Leaving rwsem_down_write_failed");
189 return sem;
190 }
191
192 /*
193 * handle waking up a waiter on the semaphore
194 * - up_read has decremented the active part of the count if we come here
195 */
rwsem_wake(struct rw_semaphore * sem)196 struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
197 {
198 unsigned long flags;
199 rwsemtrace(sem,"Entering rwsem_wake");
200
201 spin_lock_irqsave(&sem->wait_lock, flags);
202
203 /* do nothing if list empty */
204 if (!list_empty(&sem->wait_list))
205 sem = __rwsem_do_wake(sem);
206
207 spin_unlock_irqrestore(&sem->wait_lock, flags);
208
209 rwsemtrace(sem,"Leaving rwsem_wake");
210
211 return sem;
212 }
213
214 EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed);
215 EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed);
216 EXPORT_SYMBOL_NOVERS(rwsem_wake);
217 #if RWSEM_DEBUG
218 EXPORT_SYMBOL(rwsemtrace);
219 #endif
220