1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Generic waiting primitives.
4 *
5 * (C) 2004 Nadia Yvette Chambers, Oracle
6 */
7
__init_waitqueue_head(struct wait_queue_head * wq_head,const char * name,struct lock_class_key * key)8 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
9 {
10 spin_lock_init(&wq_head->lock);
11 lockdep_set_class_and_name(&wq_head->lock, key, name);
12 INIT_LIST_HEAD(&wq_head->head);
13 }
14
15 EXPORT_SYMBOL(__init_waitqueue_head);
16
add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)17 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
18 {
19 unsigned long flags;
20
21 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
22 spin_lock_irqsave(&wq_head->lock, flags);
23 __add_wait_queue(wq_head, wq_entry);
24 spin_unlock_irqrestore(&wq_head->lock, flags);
25 }
26 EXPORT_SYMBOL(add_wait_queue);
27
add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)28 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
29 {
30 unsigned long flags;
31
32 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
33 spin_lock_irqsave(&wq_head->lock, flags);
34 __add_wait_queue_entry_tail(wq_head, wq_entry);
35 spin_unlock_irqrestore(&wq_head->lock, flags);
36 }
37 EXPORT_SYMBOL(add_wait_queue_exclusive);
38
add_wait_queue_priority(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)39 void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
40 {
41 unsigned long flags;
42
43 wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
44 spin_lock_irqsave(&wq_head->lock, flags);
45 __add_wait_queue(wq_head, wq_entry);
46 spin_unlock_irqrestore(&wq_head->lock, flags);
47 }
48 EXPORT_SYMBOL_GPL(add_wait_queue_priority);
49
remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)50 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
51 {
52 unsigned long flags;
53
54 spin_lock_irqsave(&wq_head->lock, flags);
55 __remove_wait_queue(wq_head, wq_entry);
56 spin_unlock_irqrestore(&wq_head->lock, flags);
57 }
58 EXPORT_SYMBOL(remove_wait_queue);
59
60 /*
61 * Scan threshold to break wait queue walk.
62 * This allows a waker to take a break from holding the
63 * wait queue lock during the wait queue walk.
64 */
65 #define WAITQUEUE_WALK_BREAK_CNT 64
66
67 /*
68 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
69 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
70 * number) then we wake that number of exclusive tasks, and potentially all
71 * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
72 * the list and any non-exclusive tasks will be woken first. A priority task
73 * may be at the head of the list, and can consume the event without any other
74 * tasks being woken.
75 *
76 * There are circumstances in which we can try to wake a task which has already
77 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
78 * zero in this (rare) case, and we handle it by continuing to scan the queue.
79 */
__wake_up_common(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,int wake_flags,void * key,wait_queue_entry_t * bookmark)80 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
81 int nr_exclusive, int wake_flags, void *key,
82 wait_queue_entry_t *bookmark)
83 {
84 wait_queue_entry_t *curr, *next;
85 int cnt = 0;
86
87 lockdep_assert_held(&wq_head->lock);
88
89 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
90 curr = list_next_entry(bookmark, entry);
91
92 list_del(&bookmark->entry);
93 bookmark->flags = 0;
94 } else
95 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
96
97 if (&curr->entry == &wq_head->head)
98 return nr_exclusive;
99
100 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
101 unsigned flags = curr->flags;
102 int ret;
103
104 if (flags & WQ_FLAG_BOOKMARK)
105 continue;
106
107 ret = curr->func(curr, mode, wake_flags, key);
108 if (ret < 0)
109 break;
110 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
111 break;
112
113 if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
114 (&next->entry != &wq_head->head)) {
115 bookmark->flags = WQ_FLAG_BOOKMARK;
116 list_add_tail(&bookmark->entry, &next->entry);
117 break;
118 }
119 }
120
121 return nr_exclusive;
122 }
123
__wake_up_common_lock(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,int wake_flags,void * key)124 static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
125 int nr_exclusive, int wake_flags, void *key)
126 {
127 unsigned long flags;
128 wait_queue_entry_t bookmark;
129 int remaining = nr_exclusive;
130
131 bookmark.flags = 0;
132 bookmark.private = NULL;
133 bookmark.func = NULL;
134 INIT_LIST_HEAD(&bookmark.entry);
135
136 do {
137 spin_lock_irqsave(&wq_head->lock, flags);
138 remaining = __wake_up_common(wq_head, mode, remaining,
139 wake_flags, key, &bookmark);
140 spin_unlock_irqrestore(&wq_head->lock, flags);
141 } while (bookmark.flags & WQ_FLAG_BOOKMARK);
142
143 return nr_exclusive - remaining;
144 }
145
146 /**
147 * __wake_up - wake up threads blocked on a waitqueue.
148 * @wq_head: the waitqueue
149 * @mode: which threads
150 * @nr_exclusive: how many wake-one or wake-many threads to wake up
151 * @key: is directly passed to the wakeup function
152 *
153 * If this function wakes up a task, it executes a full memory barrier
154 * before accessing the task state. Returns the number of exclusive
155 * tasks that were awaken.
156 */
__wake_up(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,void * key)157 int __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
158 int nr_exclusive, void *key)
159 {
160 return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
161 }
162 EXPORT_SYMBOL(__wake_up);
163
__wake_up_on_current_cpu(struct wait_queue_head * wq_head,unsigned int mode,void * key)164 void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key)
165 {
166 __wake_up_common_lock(wq_head, mode, 1, WF_CURRENT_CPU, key);
167 }
168
169 /*
170 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
171 */
__wake_up_locked(struct wait_queue_head * wq_head,unsigned int mode,int nr)172 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
173 {
174 __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
175 }
176 EXPORT_SYMBOL_GPL(__wake_up_locked);
177
__wake_up_locked_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)178 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
179 {
180 __wake_up_common(wq_head, mode, 1, 0, key, NULL);
181 }
182 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
183
__wake_up_locked_key_bookmark(struct wait_queue_head * wq_head,unsigned int mode,void * key,wait_queue_entry_t * bookmark)184 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
185 unsigned int mode, void *key, wait_queue_entry_t *bookmark)
186 {
187 __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
188 }
189 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
190
191 /**
192 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
193 * @wq_head: the waitqueue
194 * @mode: which threads
195 * @key: opaque value to be passed to wakeup targets
196 *
197 * The sync wakeup differs that the waker knows that it will schedule
198 * away soon, so while the target thread will be woken up, it will not
199 * be migrated to another CPU - ie. the two threads are 'synchronized'
200 * with each other. This can prevent needless bouncing between CPUs.
201 *
202 * On UP it can prevent extra preemption.
203 *
204 * If this function wakes up a task, it executes a full memory barrier before
205 * accessing the task state.
206 */
__wake_up_sync_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)207 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
208 void *key)
209 {
210 if (unlikely(!wq_head))
211 return;
212
213 __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
214 }
215 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
216
217 /**
218 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
219 * @wq_head: the waitqueue
220 * @mode: which threads
221 * @key: opaque value to be passed to wakeup targets
222 *
223 * The sync wakeup differs in that the waker knows that it will schedule
224 * away soon, so while the target thread will be woken up, it will not
225 * be migrated to another CPU - ie. the two threads are 'synchronized'
226 * with each other. This can prevent needless bouncing between CPUs.
227 *
228 * On UP it can prevent extra preemption.
229 *
230 * If this function wakes up a task, it executes a full memory barrier before
231 * accessing the task state.
232 */
__wake_up_locked_sync_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)233 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
234 unsigned int mode, void *key)
235 {
236 __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
237 }
238 EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
239
240 /*
241 * __wake_up_sync - see __wake_up_sync_key()
242 */
__wake_up_sync(struct wait_queue_head * wq_head,unsigned int mode)243 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
244 {
245 __wake_up_sync_key(wq_head, mode, NULL);
246 }
247 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
248
__wake_up_pollfree(struct wait_queue_head * wq_head)249 void __wake_up_pollfree(struct wait_queue_head *wq_head)
250 {
251 __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
252 /* POLLFREE must have cleared the queue. */
253 WARN_ON_ONCE(waitqueue_active(wq_head));
254 }
255
256 /*
257 * Note: we use "set_current_state()" _after_ the wait-queue add,
258 * because we need a memory barrier there on SMP, so that any
259 * wake-function that tests for the wait-queue being active
260 * will be guaranteed to see waitqueue addition _or_ subsequent
261 * tests in this thread will see the wakeup having taken place.
262 *
263 * The spin_unlock() itself is semi-permeable and only protects
264 * one way (it only protects stuff inside the critical region and
265 * stops them from bleeding out - it would still allow subsequent
266 * loads to move into the critical region).
267 */
268 void
prepare_to_wait(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)269 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
270 {
271 unsigned long flags;
272
273 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
274 spin_lock_irqsave(&wq_head->lock, flags);
275 if (list_empty(&wq_entry->entry))
276 __add_wait_queue(wq_head, wq_entry);
277 set_current_state(state);
278 spin_unlock_irqrestore(&wq_head->lock, flags);
279 }
280 EXPORT_SYMBOL(prepare_to_wait);
281
282 /* Returns true if we are the first waiter in the queue, false otherwise. */
283 bool
prepare_to_wait_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)284 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
285 {
286 unsigned long flags;
287 bool was_empty = false;
288
289 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
290 spin_lock_irqsave(&wq_head->lock, flags);
291 if (list_empty(&wq_entry->entry)) {
292 was_empty = list_empty(&wq_head->head);
293 __add_wait_queue_entry_tail(wq_head, wq_entry);
294 }
295 set_current_state(state);
296 spin_unlock_irqrestore(&wq_head->lock, flags);
297 return was_empty;
298 }
299 EXPORT_SYMBOL(prepare_to_wait_exclusive);
300
init_wait_entry(struct wait_queue_entry * wq_entry,int flags)301 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
302 {
303 wq_entry->flags = flags;
304 wq_entry->private = current;
305 wq_entry->func = autoremove_wake_function;
306 INIT_LIST_HEAD(&wq_entry->entry);
307 }
308 EXPORT_SYMBOL(init_wait_entry);
309
prepare_to_wait_event(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)310 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
311 {
312 unsigned long flags;
313 long ret = 0;
314
315 spin_lock_irqsave(&wq_head->lock, flags);
316 if (signal_pending_state(state, current)) {
317 /*
318 * Exclusive waiter must not fail if it was selected by wakeup,
319 * it should "consume" the condition we were waiting for.
320 *
321 * The caller will recheck the condition and return success if
322 * we were already woken up, we can not miss the event because
323 * wakeup locks/unlocks the same wq_head->lock.
324 *
325 * But we need to ensure that set-condition + wakeup after that
326 * can't see us, it should wake up another exclusive waiter if
327 * we fail.
328 */
329 list_del_init(&wq_entry->entry);
330 ret = -ERESTARTSYS;
331 } else {
332 if (list_empty(&wq_entry->entry)) {
333 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
334 __add_wait_queue_entry_tail(wq_head, wq_entry);
335 else
336 __add_wait_queue(wq_head, wq_entry);
337 }
338 set_current_state(state);
339 }
340 spin_unlock_irqrestore(&wq_head->lock, flags);
341
342 return ret;
343 }
344 EXPORT_SYMBOL(prepare_to_wait_event);
345
346 /*
347 * Note! These two wait functions are entered with the
348 * wait-queue lock held (and interrupts off in the _irq
349 * case), so there is no race with testing the wakeup
350 * condition in the caller before they add the wait
351 * entry to the wake queue.
352 */
do_wait_intr(wait_queue_head_t * wq,wait_queue_entry_t * wait)353 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
354 {
355 if (likely(list_empty(&wait->entry)))
356 __add_wait_queue_entry_tail(wq, wait);
357
358 set_current_state(TASK_INTERRUPTIBLE);
359 if (signal_pending(current))
360 return -ERESTARTSYS;
361
362 spin_unlock(&wq->lock);
363 schedule();
364 spin_lock(&wq->lock);
365
366 return 0;
367 }
368 EXPORT_SYMBOL(do_wait_intr);
369
do_wait_intr_irq(wait_queue_head_t * wq,wait_queue_entry_t * wait)370 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
371 {
372 if (likely(list_empty(&wait->entry)))
373 __add_wait_queue_entry_tail(wq, wait);
374
375 set_current_state(TASK_INTERRUPTIBLE);
376 if (signal_pending(current))
377 return -ERESTARTSYS;
378
379 spin_unlock_irq(&wq->lock);
380 schedule();
381 spin_lock_irq(&wq->lock);
382
383 return 0;
384 }
385 EXPORT_SYMBOL(do_wait_intr_irq);
386
387 /**
388 * finish_wait - clean up after waiting in a queue
389 * @wq_head: waitqueue waited on
390 * @wq_entry: wait descriptor
391 *
392 * Sets current thread back to running state and removes
393 * the wait descriptor from the given waitqueue if still
394 * queued.
395 */
finish_wait(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)396 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
397 {
398 unsigned long flags;
399
400 __set_current_state(TASK_RUNNING);
401 /*
402 * We can check for list emptiness outside the lock
403 * IFF:
404 * - we use the "careful" check that verifies both
405 * the next and prev pointers, so that there cannot
406 * be any half-pending updates in progress on other
407 * CPU's that we haven't seen yet (and that might
408 * still change the stack area.
409 * and
410 * - all other users take the lock (ie we can only
411 * have _one_ other CPU that looks at or modifies
412 * the list).
413 */
414 if (!list_empty_careful(&wq_entry->entry)) {
415 spin_lock_irqsave(&wq_head->lock, flags);
416 list_del_init(&wq_entry->entry);
417 spin_unlock_irqrestore(&wq_head->lock, flags);
418 }
419 }
420 EXPORT_SYMBOL(finish_wait);
421
autoremove_wake_function(struct wait_queue_entry * wq_entry,unsigned mode,int sync,void * key)422 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
423 {
424 int ret = default_wake_function(wq_entry, mode, sync, key);
425
426 if (ret)
427 list_del_init_careful(&wq_entry->entry);
428
429 return ret;
430 }
431 EXPORT_SYMBOL(autoremove_wake_function);
432
433 /*
434 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
435 *
436 * add_wait_queue(&wq_head, &wait);
437 * for (;;) {
438 * if (condition)
439 * break;
440 *
441 * // in wait_woken() // in woken_wake_function()
442 *
443 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
444 * smp_mb(); // A try_to_wake_up():
445 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
446 * schedule() if (p->state & mode)
447 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
448 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
449 * smp_mb(); // B condition = true;
450 * } smp_mb(); // C
451 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
452 */
wait_woken(struct wait_queue_entry * wq_entry,unsigned mode,long timeout)453 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
454 {
455 /*
456 * The below executes an smp_mb(), which matches with the full barrier
457 * executed by the try_to_wake_up() in woken_wake_function() such that
458 * either we see the store to wq_entry->flags in woken_wake_function()
459 * or woken_wake_function() sees our store to current->state.
460 */
461 set_current_state(mode); /* A */
462 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !kthread_should_stop_or_park())
463 timeout = schedule_timeout(timeout);
464 __set_current_state(TASK_RUNNING);
465
466 /*
467 * The below executes an smp_mb(), which matches with the smp_mb() (C)
468 * in woken_wake_function() such that either we see the wait condition
469 * being true or the store to wq_entry->flags in woken_wake_function()
470 * follows ours in the coherence order.
471 */
472 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
473
474 return timeout;
475 }
476 EXPORT_SYMBOL(wait_woken);
477
woken_wake_function(struct wait_queue_entry * wq_entry,unsigned mode,int sync,void * key)478 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
479 {
480 /* Pairs with the smp_store_mb() in wait_woken(). */
481 smp_mb(); /* C */
482 wq_entry->flags |= WQ_FLAG_WOKEN;
483
484 return default_wake_function(wq_entry, mode, sync, key);
485 }
486 EXPORT_SYMBOL(woken_wake_function);
487