1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /*
4 * RT-specific reader/writer semaphores and reader/writer locks
5 *
6 * down_write/write_lock()
7 * 1) Lock rtmutex
8 * 2) Remove the reader BIAS to force readers into the slow path
9 * 3) Wait until all readers have left the critical section
10 * 4) Mark it write locked
11 *
12 * up_write/write_unlock()
13 * 1) Remove the write locked marker
14 * 2) Set the reader BIAS, so readers can use the fast path again
15 * 3) Unlock rtmutex, to release blocked readers
16 *
17 * down_read/read_lock()
18 * 1) Try fast path acquisition (reader BIAS is set)
19 * 2) Take tmutex::wait_lock, which protects the writelocked flag
20 * 3) If !writelocked, acquire it for read
21 * 4) If writelocked, block on tmutex
22 * 5) unlock rtmutex, goto 1)
23 *
24 * up_read/read_unlock()
25 * 1) Try fast path release (reader count != 1)
26 * 2) Wake the writer waiting in down_write()/write_lock() #3
27 *
28 * down_read/read_lock()#3 has the consequence, that rw semaphores and rw
29 * locks on RT are not writer fair, but writers, which should be avoided in
30 * RT tasks (think mmap_sem), are subject to the rtmutex priority/DL
31 * inheritance mechanism.
32 *
33 * It's possible to make the rw primitives writer fair by keeping a list of
34 * active readers. A blocked writer would force all newly incoming readers
35 * to block on the rtmutex, but the rtmutex would have to be proxy locked
36 * for one reader after the other. We can't use multi-reader inheritance
37 * because there is no way to support that with SCHED_DEADLINE.
38 * Implementing the one by one reader boosting/handover mechanism is a
39 * major surgery for a very dubious value.
40 *
41 * The risk of writer starvation is there, but the pathological use cases
42 * which trigger it are not necessarily the typical RT workloads.
43 *
44 * Fast-path orderings:
45 * The lock/unlock of readers can run in fast paths: lock and unlock are only
46 * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
47 * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
48 * and _release() (or stronger).
49 *
50 * Common code shared between RT rw_semaphore and rwlock
51 */
52
rwbase_read_trylock(struct rwbase_rt * rwb)53 static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
54 {
55 int r;
56
57 /*
58 * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
59 * set.
60 */
61 for (r = atomic_read(&rwb->readers); r < 0;) {
62 if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1)))
63 return 1;
64 }
65 return 0;
66 }
67
__rwbase_read_lock(struct rwbase_rt * rwb,unsigned int state)68 static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
69 unsigned int state)
70 {
71 struct rt_mutex_base *rtm = &rwb->rtmutex;
72 int ret;
73
74 raw_spin_lock_irq(&rtm->wait_lock);
75
76 /*
77 * Call into the slow lock path with the rtmutex->wait_lock
78 * held, so this can't result in the following race:
79 *
80 * Reader1 Reader2 Writer
81 * down_read()
82 * down_write()
83 * rtmutex_lock(m)
84 * wait()
85 * down_read()
86 * unlock(m->wait_lock)
87 * up_read()
88 * wake(Writer)
89 * lock(m->wait_lock)
90 * sem->writelocked=true
91 * unlock(m->wait_lock)
92 *
93 * up_write()
94 * sem->writelocked=false
95 * rtmutex_unlock(m)
96 * down_read()
97 * down_write()
98 * rtmutex_lock(m)
99 * wait()
100 * rtmutex_lock(m)
101 *
102 * That would put Reader1 behind the writer waiting on
103 * Reader2 to call up_read(), which might be unbound.
104 */
105
106 trace_contention_begin(rwb, LCB_F_RT | LCB_F_READ);
107
108 /*
109 * For rwlocks this returns 0 unconditionally, so the below
110 * !ret conditionals are optimized out.
111 */
112 ret = rwbase_rtmutex_slowlock_locked(rtm, state);
113
114 /*
115 * On success the rtmutex is held, so there can't be a writer
116 * active. Increment the reader count and immediately drop the
117 * rtmutex again.
118 *
119 * rtmutex->wait_lock has to be unlocked in any case of course.
120 */
121 if (!ret)
122 atomic_inc(&rwb->readers);
123 raw_spin_unlock_irq(&rtm->wait_lock);
124 if (!ret)
125 rwbase_rtmutex_unlock(rtm);
126
127 trace_contention_end(rwb, ret);
128 return ret;
129 }
130
rwbase_read_lock(struct rwbase_rt * rwb,unsigned int state)131 static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
132 unsigned int state)
133 {
134 if (rwbase_read_trylock(rwb))
135 return 0;
136
137 return __rwbase_read_lock(rwb, state);
138 }
139
__rwbase_read_unlock(struct rwbase_rt * rwb,unsigned int state)140 static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
141 unsigned int state)
142 {
143 struct rt_mutex_base *rtm = &rwb->rtmutex;
144 struct task_struct *owner;
145 DEFINE_RT_WAKE_Q(wqh);
146
147 raw_spin_lock_irq(&rtm->wait_lock);
148 /*
149 * Wake the writer, i.e. the rtmutex owner. It might release the
150 * rtmutex concurrently in the fast path (due to a signal), but to
151 * clean up rwb->readers it needs to acquire rtm->wait_lock. The
152 * worst case which can happen is a spurious wakeup.
153 */
154 owner = rt_mutex_owner(rtm);
155 if (owner)
156 rt_mutex_wake_q_add_task(&wqh, owner, state);
157
158 /* Pairs with the preempt_enable in rt_mutex_wake_up_q() */
159 preempt_disable();
160 raw_spin_unlock_irq(&rtm->wait_lock);
161 rt_mutex_wake_up_q(&wqh);
162 }
163
rwbase_read_unlock(struct rwbase_rt * rwb,unsigned int state)164 static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
165 unsigned int state)
166 {
167 /*
168 * rwb->readers can only hit 0 when a writer is waiting for the
169 * active readers to leave the critical section.
170 *
171 * dec_and_test() is fully ordered, provides RELEASE.
172 */
173 if (unlikely(atomic_dec_and_test(&rwb->readers)))
174 __rwbase_read_unlock(rwb, state);
175 }
176
__rwbase_write_unlock(struct rwbase_rt * rwb,int bias,unsigned long flags)177 static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
178 unsigned long flags)
179 {
180 struct rt_mutex_base *rtm = &rwb->rtmutex;
181
182 /*
183 * _release() is needed in case that reader is in fast path, pairing
184 * with atomic_try_cmpxchg_acquire() in rwbase_read_trylock().
185 */
186 (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
187 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
188 rwbase_rtmutex_unlock(rtm);
189 }
190
rwbase_write_unlock(struct rwbase_rt * rwb)191 static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
192 {
193 struct rt_mutex_base *rtm = &rwb->rtmutex;
194 unsigned long flags;
195
196 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
197 __rwbase_write_unlock(rwb, WRITER_BIAS, flags);
198 }
199
rwbase_write_downgrade(struct rwbase_rt * rwb)200 static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
201 {
202 struct rt_mutex_base *rtm = &rwb->rtmutex;
203 unsigned long flags;
204
205 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
206 /* Release it and account current as reader */
207 __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
208 }
209
__rwbase_write_trylock(struct rwbase_rt * rwb)210 static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
211 {
212 /* Can do without CAS because we're serialized by wait_lock. */
213 lockdep_assert_held(&rwb->rtmutex.wait_lock);
214
215 /*
216 * _acquire is needed in case the reader is in the fast path, pairing
217 * with rwbase_read_unlock(), provides ACQUIRE.
218 */
219 if (!atomic_read_acquire(&rwb->readers)) {
220 atomic_set(&rwb->readers, WRITER_BIAS);
221 return 1;
222 }
223
224 return 0;
225 }
226
rwbase_write_lock(struct rwbase_rt * rwb,unsigned int state)227 static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
228 unsigned int state)
229 {
230 struct rt_mutex_base *rtm = &rwb->rtmutex;
231 unsigned long flags;
232
233 /* Take the rtmutex as a first step */
234 if (rwbase_rtmutex_lock_state(rtm, state))
235 return -EINTR;
236
237 /* Force readers into slow path */
238 atomic_sub(READER_BIAS, &rwb->readers);
239
240 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
241 if (__rwbase_write_trylock(rwb))
242 goto out_unlock;
243
244 rwbase_set_and_save_current_state(state);
245 trace_contention_begin(rwb, LCB_F_RT | LCB_F_WRITE);
246 for (;;) {
247 /* Optimized out for rwlocks */
248 if (rwbase_signal_pending_state(state, current)) {
249 rwbase_restore_current_state();
250 __rwbase_write_unlock(rwb, 0, flags);
251 trace_contention_end(rwb, -EINTR);
252 return -EINTR;
253 }
254
255 if (__rwbase_write_trylock(rwb))
256 break;
257
258 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
259 rwbase_schedule();
260 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
261
262 set_current_state(state);
263 }
264 rwbase_restore_current_state();
265 trace_contention_end(rwb, 0);
266
267 out_unlock:
268 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
269 return 0;
270 }
271
rwbase_write_trylock(struct rwbase_rt * rwb)272 static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
273 {
274 struct rt_mutex_base *rtm = &rwb->rtmutex;
275 unsigned long flags;
276
277 if (!rwbase_rtmutex_trylock(rtm))
278 return 0;
279
280 atomic_sub(READER_BIAS, &rwb->readers);
281
282 raw_spin_lock_irqsave(&rtm->wait_lock, flags);
283 if (__rwbase_write_trylock(rwb)) {
284 raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
285 return 1;
286 }
287 __rwbase_write_unlock(rwb, 0, flags);
288 return 0;
289 }
290