1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_WAIT_H
3 #define _LINUX_WAIT_H
4 /*
5 * Linux wait queue related types and methods
6 */
7 #include <linux/list.h>
8 #include <linux/stddef.h>
9 #include <linux/spinlock.h>
10
11 #include <asm/current.h>
12 #include <uapi/linux/wait.h>
13
14 typedef struct wait_queue_entry wait_queue_entry_t;
15
16 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18
19 /* wait_queue_entry::flags */
20 #define WQ_FLAG_EXCLUSIVE 0x01
21 #define WQ_FLAG_WOKEN 0x02
22 #define WQ_FLAG_BOOKMARK 0x04
23 #define WQ_FLAG_CUSTOM 0x08
24 #define WQ_FLAG_DONE 0x10
25 #define WQ_FLAG_PRIORITY 0x20
26
27 /*
28 * A single wait-queue entry structure:
29 */
30 struct wait_queue_entry {
31 unsigned int flags;
32 void *private;
33 wait_queue_func_t func;
34 struct list_head entry;
35 };
36
37 struct wait_queue_head {
38 spinlock_t lock;
39 struct list_head head;
40 };
41 typedef struct wait_queue_head wait_queue_head_t;
42
43 struct task_struct;
44
45 /*
46 * Macros for declaration and initialisaton of the datatypes
47 */
48
49 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
50 .private = tsk, \
51 .func = default_wake_function, \
52 .entry = { NULL, NULL } }
53
54 #define DECLARE_WAITQUEUE(name, tsk) \
55 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
56
57 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
58 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
59 .head = LIST_HEAD_INIT(name.head) }
60
61 #define DECLARE_WAIT_QUEUE_HEAD(name) \
62 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
63
64 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
65
66 #define init_waitqueue_head(wq_head) \
67 do { \
68 static struct lock_class_key __key; \
69 \
70 __init_waitqueue_head((wq_head), #wq_head, &__key); \
71 } while (0)
72
73 #ifdef CONFIG_LOCKDEP
74 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
75 ({ init_waitqueue_head(&name); name; })
76 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
77 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
78 #else
79 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
80 #endif
81
init_waitqueue_entry(struct wait_queue_entry * wq_entry,struct task_struct * p)82 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
83 {
84 wq_entry->flags = 0;
85 wq_entry->private = p;
86 wq_entry->func = default_wake_function;
87 }
88
89 static inline void
init_waitqueue_func_entry(struct wait_queue_entry * wq_entry,wait_queue_func_t func)90 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
91 {
92 wq_entry->flags = 0;
93 wq_entry->private = NULL;
94 wq_entry->func = func;
95 }
96
97 /**
98 * waitqueue_active -- locklessly test for waiters on the queue
99 * @wq_head: the waitqueue to test for waiters
100 *
101 * returns true if the wait list is not empty
102 *
103 * NOTE: this function is lockless and requires care, incorrect usage _will_
104 * lead to sporadic and non-obvious failure.
105 *
106 * Use either while holding wait_queue_head::lock or when used for wakeups
107 * with an extra smp_mb() like::
108 *
109 * CPU0 - waker CPU1 - waiter
110 *
111 * for (;;) {
112 * @cond = true; prepare_to_wait(&wq_head, &wait, state);
113 * smp_mb(); // smp_mb() from set_current_state()
114 * if (waitqueue_active(wq_head)) if (@cond)
115 * wake_up(wq_head); break;
116 * schedule();
117 * }
118 * finish_wait(&wq_head, &wait);
119 *
120 * Because without the explicit smp_mb() it's possible for the
121 * waitqueue_active() load to get hoisted over the @cond store such that we'll
122 * observe an empty wait list while the waiter might not observe @cond.
123 *
124 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
125 * which (when the lock is uncontended) are of roughly equal cost.
126 */
waitqueue_active(struct wait_queue_head * wq_head)127 static inline int waitqueue_active(struct wait_queue_head *wq_head)
128 {
129 return !list_empty(&wq_head->head);
130 }
131
132 /**
133 * wq_has_single_sleeper - check if there is only one sleeper
134 * @wq_head: wait queue head
135 *
136 * Returns true of wq_head has only one sleeper on the list.
137 *
138 * Please refer to the comment for waitqueue_active.
139 */
wq_has_single_sleeper(struct wait_queue_head * wq_head)140 static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
141 {
142 return list_is_singular(&wq_head->head);
143 }
144
145 /**
146 * wq_has_sleeper - check if there are any waiting processes
147 * @wq_head: wait queue head
148 *
149 * Returns true if wq_head has waiting processes
150 *
151 * Please refer to the comment for waitqueue_active.
152 */
wq_has_sleeper(struct wait_queue_head * wq_head)153 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
154 {
155 /*
156 * We need to be sure we are in sync with the
157 * add_wait_queue modifications to the wait queue.
158 *
159 * This memory barrier should be paired with one on the
160 * waiting side.
161 */
162 smp_mb();
163 return waitqueue_active(wq_head);
164 }
165
166 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168 extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
169 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
170
__add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)171 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
172 {
173 struct list_head *head = &wq_head->head;
174 struct wait_queue_entry *wq;
175
176 list_for_each_entry(wq, &wq_head->head, entry) {
177 if (!(wq->flags & WQ_FLAG_PRIORITY))
178 break;
179 head = &wq->entry;
180 }
181 list_add(&wq_entry->entry, head);
182 }
183
184 /*
185 * Used for wake-one threads:
186 */
187 static inline void
__add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)188 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
189 {
190 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
191 __add_wait_queue(wq_head, wq_entry);
192 }
193
__add_wait_queue_entry_tail(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)194 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
195 {
196 list_add_tail(&wq_entry->entry, &wq_head->head);
197 }
198
199 static inline void
__add_wait_queue_entry_tail_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)200 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
201 {
202 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
203 __add_wait_queue_entry_tail(wq_head, wq_entry);
204 }
205
206 static inline void
__remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)207 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
208 {
209 list_del(&wq_entry->entry);
210 }
211
212 int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
213 void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key);
214 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
215 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
216 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
217 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
218 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
219 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
220 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
221 void __wake_up_pollfree(struct wait_queue_head *wq_head);
222
223 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
224 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
225 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
226 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
227 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
228
229 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
230 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
231 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
232 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
233
234 /*
235 * Wakeup macros to be used to report events to the targets.
236 */
237 #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
238 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
239 #define wake_up_poll(x, m) \
240 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
241 #define wake_up_poll_on_current_cpu(x, m) \
242 __wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m))
243 #define wake_up_locked_poll(x, m) \
244 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
245 #define wake_up_interruptible_poll(x, m) \
246 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
247 #define wake_up_interruptible_sync_poll(x, m) \
248 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
249 #define wake_up_interruptible_sync_poll_locked(x, m) \
250 __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
251
252 /**
253 * wake_up_pollfree - signal that a polled waitqueue is going away
254 * @wq_head: the wait queue head
255 *
256 * In the very rare cases where a ->poll() implementation uses a waitqueue whose
257 * lifetime is tied to a task rather than to the 'struct file' being polled,
258 * this function must be called before the waitqueue is freed so that
259 * non-blocking polls (e.g. epoll) are notified that the queue is going away.
260 *
261 * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
262 * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
263 */
wake_up_pollfree(struct wait_queue_head * wq_head)264 static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
265 {
266 /*
267 * For performance reasons, we don't always take the queue lock here.
268 * Therefore, we might race with someone removing the last entry from
269 * the queue, and proceed while they still hold the queue lock.
270 * However, rcu_read_lock() is required to be held in such cases, so we
271 * can safely proceed with an RCU-delayed free.
272 */
273 if (waitqueue_active(wq_head))
274 __wake_up_pollfree(wq_head);
275 }
276
277 #define ___wait_cond_timeout(condition) \
278 ({ \
279 bool __cond = (condition); \
280 if (__cond && !__ret) \
281 __ret = 1; \
282 __cond || !__ret; \
283 })
284
285 #define ___wait_is_interruptible(state) \
286 (!__builtin_constant_p(state) || \
287 (state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
288
289 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
290
291 /*
292 * The below macro ___wait_event() has an explicit shadow of the __ret
293 * variable when used from the wait_event_*() macros.
294 *
295 * This is so that both can use the ___wait_cond_timeout() construct
296 * to wrap the condition.
297 *
298 * The type inconsistency of the wait_event_*() __ret variable is also
299 * on purpose; we use long where we can return timeout values and int
300 * otherwise.
301 */
302
303 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
304 ({ \
305 __label__ __out; \
306 struct wait_queue_entry __wq_entry; \
307 long __ret = ret; /* explicit shadow */ \
308 \
309 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
310 for (;;) { \
311 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
312 \
313 if (condition) \
314 break; \
315 \
316 if (___wait_is_interruptible(state) && __int) { \
317 __ret = __int; \
318 goto __out; \
319 } \
320 \
321 cmd; \
322 } \
323 finish_wait(&wq_head, &__wq_entry); \
324 __out: __ret; \
325 })
326
327 #define __wait_event(wq_head, condition) \
328 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
329 schedule())
330
331 /**
332 * wait_event - sleep until a condition gets true
333 * @wq_head: the waitqueue to wait on
334 * @condition: a C expression for the event to wait for
335 *
336 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
337 * @condition evaluates to true. The @condition is checked each time
338 * the waitqueue @wq_head is woken up.
339 *
340 * wake_up() has to be called after changing any variable that could
341 * change the result of the wait condition.
342 */
343 #define wait_event(wq_head, condition) \
344 do { \
345 might_sleep(); \
346 if (condition) \
347 break; \
348 __wait_event(wq_head, condition); \
349 } while (0)
350
351 #define __io_wait_event(wq_head, condition) \
352 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
353 io_schedule())
354
355 /*
356 * io_wait_event() -- like wait_event() but with io_schedule()
357 */
358 #define io_wait_event(wq_head, condition) \
359 do { \
360 might_sleep(); \
361 if (condition) \
362 break; \
363 __io_wait_event(wq_head, condition); \
364 } while (0)
365
366 #define __wait_event_freezable(wq_head, condition) \
367 ___wait_event(wq_head, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), \
368 0, 0, schedule())
369
370 /**
371 * wait_event_freezable - sleep (or freeze) until a condition gets true
372 * @wq_head: the waitqueue to wait on
373 * @condition: a C expression for the event to wait for
374 *
375 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
376 * to system load) until the @condition evaluates to true. The
377 * @condition is checked each time the waitqueue @wq_head is woken up.
378 *
379 * wake_up() has to be called after changing any variable that could
380 * change the result of the wait condition.
381 */
382 #define wait_event_freezable(wq_head, condition) \
383 ({ \
384 int __ret = 0; \
385 might_sleep(); \
386 if (!(condition)) \
387 __ret = __wait_event_freezable(wq_head, condition); \
388 __ret; \
389 })
390
391 #define __wait_event_timeout(wq_head, condition, timeout) \
392 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
393 TASK_UNINTERRUPTIBLE, 0, timeout, \
394 __ret = schedule_timeout(__ret))
395
396 /**
397 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
398 * @wq_head: the waitqueue to wait on
399 * @condition: a C expression for the event to wait for
400 * @timeout: timeout, in jiffies
401 *
402 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
403 * @condition evaluates to true. The @condition is checked each time
404 * the waitqueue @wq_head is woken up.
405 *
406 * wake_up() has to be called after changing any variable that could
407 * change the result of the wait condition.
408 *
409 * Returns:
410 * 0 if the @condition evaluated to %false after the @timeout elapsed,
411 * 1 if the @condition evaluated to %true after the @timeout elapsed,
412 * or the remaining jiffies (at least 1) if the @condition evaluated
413 * to %true before the @timeout elapsed.
414 */
415 #define wait_event_timeout(wq_head, condition, timeout) \
416 ({ \
417 long __ret = timeout; \
418 might_sleep(); \
419 if (!___wait_cond_timeout(condition)) \
420 __ret = __wait_event_timeout(wq_head, condition, timeout); \
421 __ret; \
422 })
423
424 #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
425 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
426 (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 0, timeout, \
427 __ret = schedule_timeout(__ret))
428
429 /*
430 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
431 * increasing load and is freezable.
432 */
433 #define wait_event_freezable_timeout(wq_head, condition, timeout) \
434 ({ \
435 long __ret = timeout; \
436 might_sleep(); \
437 if (!___wait_cond_timeout(condition)) \
438 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
439 __ret; \
440 })
441
442 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
443 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
444 cmd1; schedule(); cmd2)
445 /*
446 * Just like wait_event_cmd(), except it sets exclusive flag
447 */
448 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
449 do { \
450 if (condition) \
451 break; \
452 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
453 } while (0)
454
455 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
456 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
457 cmd1; schedule(); cmd2)
458
459 /**
460 * wait_event_cmd - sleep until a condition gets true
461 * @wq_head: the waitqueue to wait on
462 * @condition: a C expression for the event to wait for
463 * @cmd1: the command will be executed before sleep
464 * @cmd2: the command will be executed after sleep
465 *
466 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
467 * @condition evaluates to true. The @condition is checked each time
468 * the waitqueue @wq_head is woken up.
469 *
470 * wake_up() has to be called after changing any variable that could
471 * change the result of the wait condition.
472 */
473 #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
474 do { \
475 if (condition) \
476 break; \
477 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
478 } while (0)
479
480 #define __wait_event_interruptible(wq_head, condition) \
481 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
482 schedule())
483
484 /**
485 * wait_event_interruptible - sleep until a condition gets true
486 * @wq_head: the waitqueue to wait on
487 * @condition: a C expression for the event to wait for
488 *
489 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
490 * @condition evaluates to true or a signal is received.
491 * The @condition is checked each time the waitqueue @wq_head is woken up.
492 *
493 * wake_up() has to be called after changing any variable that could
494 * change the result of the wait condition.
495 *
496 * The function will return -ERESTARTSYS if it was interrupted by a
497 * signal and 0 if @condition evaluated to true.
498 */
499 #define wait_event_interruptible(wq_head, condition) \
500 ({ \
501 int __ret = 0; \
502 might_sleep(); \
503 if (!(condition)) \
504 __ret = __wait_event_interruptible(wq_head, condition); \
505 __ret; \
506 })
507
508 #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
509 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
510 TASK_INTERRUPTIBLE, 0, timeout, \
511 __ret = schedule_timeout(__ret))
512
513 /**
514 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
515 * @wq_head: the waitqueue to wait on
516 * @condition: a C expression for the event to wait for
517 * @timeout: timeout, in jiffies
518 *
519 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
520 * @condition evaluates to true or a signal is received.
521 * The @condition is checked each time the waitqueue @wq_head is woken up.
522 *
523 * wake_up() has to be called after changing any variable that could
524 * change the result of the wait condition.
525 *
526 * Returns:
527 * 0 if the @condition evaluated to %false after the @timeout elapsed,
528 * 1 if the @condition evaluated to %true after the @timeout elapsed,
529 * the remaining jiffies (at least 1) if the @condition evaluated
530 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
531 * interrupted by a signal.
532 */
533 #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
534 ({ \
535 long __ret = timeout; \
536 might_sleep(); \
537 if (!___wait_cond_timeout(condition)) \
538 __ret = __wait_event_interruptible_timeout(wq_head, \
539 condition, timeout); \
540 __ret; \
541 })
542
543 #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
544 ({ \
545 int __ret = 0; \
546 struct hrtimer_sleeper __t; \
547 \
548 hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
549 HRTIMER_MODE_REL); \
550 if ((timeout) != KTIME_MAX) { \
551 hrtimer_set_expires_range_ns(&__t.timer, timeout, \
552 current->timer_slack_ns); \
553 hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
554 } \
555 \
556 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
557 if (!__t.task) { \
558 __ret = -ETIME; \
559 break; \
560 } \
561 schedule()); \
562 \
563 hrtimer_cancel(&__t.timer); \
564 destroy_hrtimer_on_stack(&__t.timer); \
565 __ret; \
566 })
567
568 /**
569 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
570 * @wq_head: the waitqueue to wait on
571 * @condition: a C expression for the event to wait for
572 * @timeout: timeout, as a ktime_t
573 *
574 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
575 * @condition evaluates to true or a signal is received.
576 * The @condition is checked each time the waitqueue @wq_head is woken up.
577 *
578 * wake_up() has to be called after changing any variable that could
579 * change the result of the wait condition.
580 *
581 * The function returns 0 if @condition became true, or -ETIME if the timeout
582 * elapsed.
583 */
584 #define wait_event_hrtimeout(wq_head, condition, timeout) \
585 ({ \
586 int __ret = 0; \
587 might_sleep(); \
588 if (!(condition)) \
589 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
590 TASK_UNINTERRUPTIBLE); \
591 __ret; \
592 })
593
594 /**
595 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
596 * @wq: the waitqueue to wait on
597 * @condition: a C expression for the event to wait for
598 * @timeout: timeout, as a ktime_t
599 *
600 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
601 * @condition evaluates to true or a signal is received.
602 * The @condition is checked each time the waitqueue @wq is woken up.
603 *
604 * wake_up() has to be called after changing any variable that could
605 * change the result of the wait condition.
606 *
607 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
608 * interrupted by a signal, or -ETIME if the timeout elapsed.
609 */
610 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
611 ({ \
612 long __ret = 0; \
613 might_sleep(); \
614 if (!(condition)) \
615 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
616 TASK_INTERRUPTIBLE); \
617 __ret; \
618 })
619
620 #define __wait_event_interruptible_exclusive(wq, condition) \
621 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
622 schedule())
623
624 #define wait_event_interruptible_exclusive(wq, condition) \
625 ({ \
626 int __ret = 0; \
627 might_sleep(); \
628 if (!(condition)) \
629 __ret = __wait_event_interruptible_exclusive(wq, condition); \
630 __ret; \
631 })
632
633 #define __wait_event_killable_exclusive(wq, condition) \
634 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
635 schedule())
636
637 #define wait_event_killable_exclusive(wq, condition) \
638 ({ \
639 int __ret = 0; \
640 might_sleep(); \
641 if (!(condition)) \
642 __ret = __wait_event_killable_exclusive(wq, condition); \
643 __ret; \
644 })
645
646
647 #define __wait_event_freezable_exclusive(wq, condition) \
648 ___wait_event(wq, condition, (TASK_INTERRUPTIBLE|TASK_FREEZABLE), 1, 0,\
649 schedule())
650
651 #define wait_event_freezable_exclusive(wq, condition) \
652 ({ \
653 int __ret = 0; \
654 might_sleep(); \
655 if (!(condition)) \
656 __ret = __wait_event_freezable_exclusive(wq, condition); \
657 __ret; \
658 })
659
660 /**
661 * wait_event_idle - wait for a condition without contributing to system load
662 * @wq_head: the waitqueue to wait on
663 * @condition: a C expression for the event to wait for
664 *
665 * The process is put to sleep (TASK_IDLE) until the
666 * @condition evaluates to true.
667 * The @condition is checked each time the waitqueue @wq_head is woken up.
668 *
669 * wake_up() has to be called after changing any variable that could
670 * change the result of the wait condition.
671 *
672 */
673 #define wait_event_idle(wq_head, condition) \
674 do { \
675 might_sleep(); \
676 if (!(condition)) \
677 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
678 } while (0)
679
680 /**
681 * wait_event_idle_exclusive - wait for a condition with contributing to system load
682 * @wq_head: the waitqueue to wait on
683 * @condition: a C expression for the event to wait for
684 *
685 * The process is put to sleep (TASK_IDLE) until the
686 * @condition evaluates to true.
687 * The @condition is checked each time the waitqueue @wq_head is woken up.
688 *
689 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
690 * set thus if other processes wait on the same list, when this
691 * process is woken further processes are not considered.
692 *
693 * wake_up() has to be called after changing any variable that could
694 * change the result of the wait condition.
695 *
696 */
697 #define wait_event_idle_exclusive(wq_head, condition) \
698 do { \
699 might_sleep(); \
700 if (!(condition)) \
701 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
702 } while (0)
703
704 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
705 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
706 TASK_IDLE, 0, timeout, \
707 __ret = schedule_timeout(__ret))
708
709 /**
710 * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
711 * @wq_head: the waitqueue to wait on
712 * @condition: a C expression for the event to wait for
713 * @timeout: timeout, in jiffies
714 *
715 * The process is put to sleep (TASK_IDLE) until the
716 * @condition evaluates to true. The @condition is checked each time
717 * the waitqueue @wq_head is woken up.
718 *
719 * wake_up() has to be called after changing any variable that could
720 * change the result of the wait condition.
721 *
722 * Returns:
723 * 0 if the @condition evaluated to %false after the @timeout elapsed,
724 * 1 if the @condition evaluated to %true after the @timeout elapsed,
725 * or the remaining jiffies (at least 1) if the @condition evaluated
726 * to %true before the @timeout elapsed.
727 */
728 #define wait_event_idle_timeout(wq_head, condition, timeout) \
729 ({ \
730 long __ret = timeout; \
731 might_sleep(); \
732 if (!___wait_cond_timeout(condition)) \
733 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
734 __ret; \
735 })
736
737 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
738 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
739 TASK_IDLE, 1, timeout, \
740 __ret = schedule_timeout(__ret))
741
742 /**
743 * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
744 * @wq_head: the waitqueue to wait on
745 * @condition: a C expression for the event to wait for
746 * @timeout: timeout, in jiffies
747 *
748 * The process is put to sleep (TASK_IDLE) until the
749 * @condition evaluates to true. The @condition is checked each time
750 * the waitqueue @wq_head is woken up.
751 *
752 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
753 * set thus if other processes wait on the same list, when this
754 * process is woken further processes are not considered.
755 *
756 * wake_up() has to be called after changing any variable that could
757 * change the result of the wait condition.
758 *
759 * Returns:
760 * 0 if the @condition evaluated to %false after the @timeout elapsed,
761 * 1 if the @condition evaluated to %true after the @timeout elapsed,
762 * or the remaining jiffies (at least 1) if the @condition evaluated
763 * to %true before the @timeout elapsed.
764 */
765 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
766 ({ \
767 long __ret = timeout; \
768 might_sleep(); \
769 if (!___wait_cond_timeout(condition)) \
770 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
771 __ret; \
772 })
773
774 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
775 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
776
777 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
778 ({ \
779 int __ret; \
780 DEFINE_WAIT(__wait); \
781 if (exclusive) \
782 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
783 do { \
784 __ret = fn(&(wq), &__wait); \
785 if (__ret) \
786 break; \
787 } while (!(condition)); \
788 __remove_wait_queue(&(wq), &__wait); \
789 __set_current_state(TASK_RUNNING); \
790 __ret; \
791 })
792
793
794 /**
795 * wait_event_interruptible_locked - sleep until a condition gets true
796 * @wq: the waitqueue to wait on
797 * @condition: a C expression for the event to wait for
798 *
799 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
800 * @condition evaluates to true or a signal is received.
801 * The @condition is checked each time the waitqueue @wq is woken up.
802 *
803 * It must be called with wq.lock being held. This spinlock is
804 * unlocked while sleeping but @condition testing is done while lock
805 * is held and when this macro exits the lock is held.
806 *
807 * The lock is locked/unlocked using spin_lock()/spin_unlock()
808 * functions which must match the way they are locked/unlocked outside
809 * of this macro.
810 *
811 * wake_up_locked() has to be called after changing any variable that could
812 * change the result of the wait condition.
813 *
814 * The function will return -ERESTARTSYS if it was interrupted by a
815 * signal and 0 if @condition evaluated to true.
816 */
817 #define wait_event_interruptible_locked(wq, condition) \
818 ((condition) \
819 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
820
821 /**
822 * wait_event_interruptible_locked_irq - sleep until a condition gets true
823 * @wq: the waitqueue to wait on
824 * @condition: a C expression for the event to wait for
825 *
826 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
827 * @condition evaluates to true or a signal is received.
828 * The @condition is checked each time the waitqueue @wq is woken up.
829 *
830 * It must be called with wq.lock being held. This spinlock is
831 * unlocked while sleeping but @condition testing is done while lock
832 * is held and when this macro exits the lock is held.
833 *
834 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
835 * functions which must match the way they are locked/unlocked outside
836 * of this macro.
837 *
838 * wake_up_locked() has to be called after changing any variable that could
839 * change the result of the wait condition.
840 *
841 * The function will return -ERESTARTSYS if it was interrupted by a
842 * signal and 0 if @condition evaluated to true.
843 */
844 #define wait_event_interruptible_locked_irq(wq, condition) \
845 ((condition) \
846 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
847
848 /**
849 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
850 * @wq: the waitqueue to wait on
851 * @condition: a C expression for the event to wait for
852 *
853 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
854 * @condition evaluates to true or a signal is received.
855 * The @condition is checked each time the waitqueue @wq is woken up.
856 *
857 * It must be called with wq.lock being held. This spinlock is
858 * unlocked while sleeping but @condition testing is done while lock
859 * is held and when this macro exits the lock is held.
860 *
861 * The lock is locked/unlocked using spin_lock()/spin_unlock()
862 * functions which must match the way they are locked/unlocked outside
863 * of this macro.
864 *
865 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
866 * set thus when other process waits process on the list if this
867 * process is awaken further processes are not considered.
868 *
869 * wake_up_locked() has to be called after changing any variable that could
870 * change the result of the wait condition.
871 *
872 * The function will return -ERESTARTSYS if it was interrupted by a
873 * signal and 0 if @condition evaluated to true.
874 */
875 #define wait_event_interruptible_exclusive_locked(wq, condition) \
876 ((condition) \
877 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
878
879 /**
880 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
881 * @wq: the waitqueue to wait on
882 * @condition: a C expression for the event to wait for
883 *
884 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
885 * @condition evaluates to true or a signal is received.
886 * The @condition is checked each time the waitqueue @wq is woken up.
887 *
888 * It must be called with wq.lock being held. This spinlock is
889 * unlocked while sleeping but @condition testing is done while lock
890 * is held and when this macro exits the lock is held.
891 *
892 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
893 * functions which must match the way they are locked/unlocked outside
894 * of this macro.
895 *
896 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
897 * set thus when other process waits process on the list if this
898 * process is awaken further processes are not considered.
899 *
900 * wake_up_locked() has to be called after changing any variable that could
901 * change the result of the wait condition.
902 *
903 * The function will return -ERESTARTSYS if it was interrupted by a
904 * signal and 0 if @condition evaluated to true.
905 */
906 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
907 ((condition) \
908 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
909
910
911 #define __wait_event_killable(wq, condition) \
912 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
913
914 /**
915 * wait_event_killable - sleep until a condition gets true
916 * @wq_head: the waitqueue to wait on
917 * @condition: a C expression for the event to wait for
918 *
919 * The process is put to sleep (TASK_KILLABLE) until the
920 * @condition evaluates to true or a signal is received.
921 * The @condition is checked each time the waitqueue @wq_head is woken up.
922 *
923 * wake_up() has to be called after changing any variable that could
924 * change the result of the wait condition.
925 *
926 * The function will return -ERESTARTSYS if it was interrupted by a
927 * signal and 0 if @condition evaluated to true.
928 */
929 #define wait_event_killable(wq_head, condition) \
930 ({ \
931 int __ret = 0; \
932 might_sleep(); \
933 if (!(condition)) \
934 __ret = __wait_event_killable(wq_head, condition); \
935 __ret; \
936 })
937
938 #define __wait_event_state(wq, condition, state) \
939 ___wait_event(wq, condition, state, 0, 0, schedule())
940
941 /**
942 * wait_event_state - sleep until a condition gets true
943 * @wq_head: the waitqueue to wait on
944 * @condition: a C expression for the event to wait for
945 * @state: state to sleep in
946 *
947 * The process is put to sleep (@state) until the @condition evaluates to true
948 * or a signal is received (when allowed by @state). The @condition is checked
949 * each time the waitqueue @wq_head is woken up.
950 *
951 * wake_up() has to be called after changing any variable that could
952 * change the result of the wait condition.
953 *
954 * The function will return -ERESTARTSYS if it was interrupted by a signal
955 * (when allowed by @state) and 0 if @condition evaluated to true.
956 */
957 #define wait_event_state(wq_head, condition, state) \
958 ({ \
959 int __ret = 0; \
960 might_sleep(); \
961 if (!(condition)) \
962 __ret = __wait_event_state(wq_head, condition, state); \
963 __ret; \
964 })
965
966 #define __wait_event_killable_timeout(wq_head, condition, timeout) \
967 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
968 TASK_KILLABLE, 0, timeout, \
969 __ret = schedule_timeout(__ret))
970
971 /**
972 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
973 * @wq_head: the waitqueue to wait on
974 * @condition: a C expression for the event to wait for
975 * @timeout: timeout, in jiffies
976 *
977 * The process is put to sleep (TASK_KILLABLE) until the
978 * @condition evaluates to true or a kill signal is received.
979 * The @condition is checked each time the waitqueue @wq_head is woken up.
980 *
981 * wake_up() has to be called after changing any variable that could
982 * change the result of the wait condition.
983 *
984 * Returns:
985 * 0 if the @condition evaluated to %false after the @timeout elapsed,
986 * 1 if the @condition evaluated to %true after the @timeout elapsed,
987 * the remaining jiffies (at least 1) if the @condition evaluated
988 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
989 * interrupted by a kill signal.
990 *
991 * Only kill signals interrupt this process.
992 */
993 #define wait_event_killable_timeout(wq_head, condition, timeout) \
994 ({ \
995 long __ret = timeout; \
996 might_sleep(); \
997 if (!___wait_cond_timeout(condition)) \
998 __ret = __wait_event_killable_timeout(wq_head, \
999 condition, timeout); \
1000 __ret; \
1001 })
1002
1003
1004 #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
1005 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
1006 spin_unlock_irq(&lock); \
1007 cmd; \
1008 schedule(); \
1009 spin_lock_irq(&lock))
1010
1011 /**
1012 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
1013 * condition is checked under the lock. This
1014 * is expected to be called with the lock
1015 * taken.
1016 * @wq_head: the waitqueue to wait on
1017 * @condition: a C expression for the event to wait for
1018 * @lock: a locked spinlock_t, which will be released before cmd
1019 * and schedule() and reacquired afterwards.
1020 * @cmd: a command which is invoked outside the critical section before
1021 * sleep
1022 *
1023 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1024 * @condition evaluates to true. The @condition is checked each time
1025 * the waitqueue @wq_head is woken up.
1026 *
1027 * wake_up() has to be called after changing any variable that could
1028 * change the result of the wait condition.
1029 *
1030 * This is supposed to be called while holding the lock. The lock is
1031 * dropped before invoking the cmd and going to sleep and is reacquired
1032 * afterwards.
1033 */
1034 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
1035 do { \
1036 if (condition) \
1037 break; \
1038 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
1039 } while (0)
1040
1041 /**
1042 * wait_event_lock_irq - sleep until a condition gets true. The
1043 * condition is checked under the lock. This
1044 * is expected to be called with the lock
1045 * taken.
1046 * @wq_head: the waitqueue to wait on
1047 * @condition: a C expression for the event to wait for
1048 * @lock: a locked spinlock_t, which will be released before schedule()
1049 * and reacquired afterwards.
1050 *
1051 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1052 * @condition evaluates to true. The @condition is checked each time
1053 * the waitqueue @wq_head is woken up.
1054 *
1055 * wake_up() has to be called after changing any variable that could
1056 * change the result of the wait condition.
1057 *
1058 * This is supposed to be called while holding the lock. The lock is
1059 * dropped before going to sleep and is reacquired afterwards.
1060 */
1061 #define wait_event_lock_irq(wq_head, condition, lock) \
1062 do { \
1063 if (condition) \
1064 break; \
1065 __wait_event_lock_irq(wq_head, condition, lock, ); \
1066 } while (0)
1067
1068
1069 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
1070 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
1071 spin_unlock_irq(&lock); \
1072 cmd; \
1073 schedule(); \
1074 spin_lock_irq(&lock))
1075
1076 /**
1077 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1078 * The condition is checked under the lock. This is expected to
1079 * be called with the lock taken.
1080 * @wq_head: the waitqueue to wait on
1081 * @condition: a C expression for the event to wait for
1082 * @lock: a locked spinlock_t, which will be released before cmd and
1083 * schedule() and reacquired afterwards.
1084 * @cmd: a command which is invoked outside the critical section before
1085 * sleep
1086 *
1087 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1088 * @condition evaluates to true or a signal is received. The @condition is
1089 * checked each time the waitqueue @wq_head is woken up.
1090 *
1091 * wake_up() has to be called after changing any variable that could
1092 * change the result of the wait condition.
1093 *
1094 * This is supposed to be called while holding the lock. The lock is
1095 * dropped before invoking the cmd and going to sleep and is reacquired
1096 * afterwards.
1097 *
1098 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1099 * and 0 if @condition evaluated to true.
1100 */
1101 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
1102 ({ \
1103 int __ret = 0; \
1104 if (!(condition)) \
1105 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1106 condition, lock, cmd); \
1107 __ret; \
1108 })
1109
1110 /**
1111 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1112 * The condition is checked under the lock. This is expected
1113 * to be called with the lock taken.
1114 * @wq_head: the waitqueue to wait on
1115 * @condition: a C expression for the event to wait for
1116 * @lock: a locked spinlock_t, which will be released before schedule()
1117 * and reacquired afterwards.
1118 *
1119 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1120 * @condition evaluates to true or signal is received. The @condition is
1121 * checked each time the waitqueue @wq_head is woken up.
1122 *
1123 * wake_up() has to be called after changing any variable that could
1124 * change the result of the wait condition.
1125 *
1126 * This is supposed to be called while holding the lock. The lock is
1127 * dropped before going to sleep and is reacquired afterwards.
1128 *
1129 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1130 * and 0 if @condition evaluated to true.
1131 */
1132 #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
1133 ({ \
1134 int __ret = 0; \
1135 if (!(condition)) \
1136 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1137 condition, lock,); \
1138 __ret; \
1139 })
1140
1141 #define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1142 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1143 state, 0, timeout, \
1144 spin_unlock_irq(&lock); \
1145 __ret = schedule_timeout(__ret); \
1146 spin_lock_irq(&lock));
1147
1148 /**
1149 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1150 * true or a timeout elapses. The condition is checked under
1151 * the lock. This is expected to be called with the lock taken.
1152 * @wq_head: the waitqueue to wait on
1153 * @condition: a C expression for the event to wait for
1154 * @lock: a locked spinlock_t, which will be released before schedule()
1155 * and reacquired afterwards.
1156 * @timeout: timeout, in jiffies
1157 *
1158 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1159 * @condition evaluates to true or signal is received. The @condition is
1160 * checked each time the waitqueue @wq_head is woken up.
1161 *
1162 * wake_up() has to be called after changing any variable that could
1163 * change the result of the wait condition.
1164 *
1165 * This is supposed to be called while holding the lock. The lock is
1166 * dropped before going to sleep and is reacquired afterwards.
1167 *
1168 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1169 * was interrupted by a signal, and the remaining jiffies otherwise
1170 * if the condition evaluated to true before the timeout elapsed.
1171 */
1172 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
1173 timeout) \
1174 ({ \
1175 long __ret = timeout; \
1176 if (!___wait_cond_timeout(condition)) \
1177 __ret = __wait_event_lock_irq_timeout( \
1178 wq_head, condition, lock, timeout, \
1179 TASK_INTERRUPTIBLE); \
1180 __ret; \
1181 })
1182
1183 #define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1184 ({ \
1185 long __ret = timeout; \
1186 if (!___wait_cond_timeout(condition)) \
1187 __ret = __wait_event_lock_irq_timeout( \
1188 wq_head, condition, lock, timeout, \
1189 TASK_UNINTERRUPTIBLE); \
1190 __ret; \
1191 })
1192
1193 /*
1194 * Waitqueues which are removed from the waitqueue_head at wakeup time
1195 */
1196 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1197 bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1198 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1199 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1200 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1201 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1202 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1203
1204 #define DEFINE_WAIT_FUNC(name, function) \
1205 struct wait_queue_entry name = { \
1206 .private = current, \
1207 .func = function, \
1208 .entry = LIST_HEAD_INIT((name).entry), \
1209 }
1210
1211 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1212
1213 #define init_wait(wait) \
1214 do { \
1215 (wait)->private = current; \
1216 (wait)->func = autoremove_wake_function; \
1217 INIT_LIST_HEAD(&(wait)->entry); \
1218 (wait)->flags = 0; \
1219 } while (0)
1220
1221 typedef int (*task_call_f)(struct task_struct *p, void *arg);
1222 extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
1223
1224 #endif /* _LINUX_WAIT_H */
1225