1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3
4 #define WNOHANG 0x00000001
5 #define WUNTRACED 0x00000002
6 #define WSTOPPED WUNTRACED
7 #define WEXITED 0x00000004
8 #define WCONTINUED 0x00000008
9 #define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
10
11 #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
12 #define __WALL 0x40000000 /* Wait on all children, regardless of type */
13 #define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
14
15 /* First argument to waitid: */
16 #define P_ALL 0
17 #define P_PID 1
18 #define P_PGID 2
19
20 #ifdef __KERNEL__
21
22 #include <linux/list.h>
23 #include <linux/stddef.h>
24 #include <linux/spinlock.h>
25 #include <asm/current.h>
26
27 typedef struct __wait_queue wait_queue_t;
28 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
29 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
30
31 struct __wait_queue {
32 unsigned int flags;
33 #define WQ_FLAG_EXCLUSIVE 0x01
34 void *private;
35 wait_queue_func_t func;
36 struct list_head task_list;
37 };
38
39 struct wait_bit_key {
40 void *flags;
41 int bit_nr;
42 };
43
44 struct wait_bit_queue {
45 struct wait_bit_key key;
46 wait_queue_t wait;
47 };
48
49 struct __wait_queue_head {
50 spinlock_t lock;
51 struct list_head task_list;
52 };
53 typedef struct __wait_queue_head wait_queue_head_t;
54
55 struct task_struct;
56
57 /*
58 * Macros for declaration and initialisaton of the datatypes
59 */
60
61 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
62 .private = tsk, \
63 .func = default_wake_function, \
64 .task_list = { NULL, NULL } }
65
66 #define DECLARE_WAITQUEUE(name, tsk) \
67 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
68
69 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
70 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
71 .task_list = { &(name).task_list, &(name).task_list } }
72
73 #define DECLARE_WAIT_QUEUE_HEAD(name) \
74 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
75
76 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
77 { .flags = word, .bit_nr = bit, }
78
79 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
80
81 #define init_waitqueue_head(q) \
82 do { \
83 static struct lock_class_key __key; \
84 \
85 __init_waitqueue_head((q), #q, &__key); \
86 } while (0)
87
88 #ifdef CONFIG_LOCKDEP
89 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
90 ({ init_waitqueue_head(&name); name; })
91 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
92 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
93 #else
94 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
95 #endif
96
init_waitqueue_entry(wait_queue_t * q,struct task_struct * p)97 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
98 {
99 q->flags = 0;
100 q->private = p;
101 q->func = default_wake_function;
102 }
103
init_waitqueue_func_entry(wait_queue_t * q,wait_queue_func_t func)104 static inline void init_waitqueue_func_entry(wait_queue_t *q,
105 wait_queue_func_t func)
106 {
107 q->flags = 0;
108 q->private = NULL;
109 q->func = func;
110 }
111
waitqueue_active(wait_queue_head_t * q)112 static inline int waitqueue_active(wait_queue_head_t *q)
113 {
114 return !list_empty(&q->task_list);
115 }
116
117 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
118 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
119 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
120
__add_wait_queue(wait_queue_head_t * head,wait_queue_t * new)121 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
122 {
123 list_add(&new->task_list, &head->task_list);
124 }
125
126 /*
127 * Used for wake-one threads:
128 */
__add_wait_queue_exclusive(wait_queue_head_t * q,wait_queue_t * wait)129 static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
130 wait_queue_t *wait)
131 {
132 wait->flags |= WQ_FLAG_EXCLUSIVE;
133 __add_wait_queue(q, wait);
134 }
135
__add_wait_queue_tail(wait_queue_head_t * head,wait_queue_t * new)136 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
137 wait_queue_t *new)
138 {
139 list_add_tail(&new->task_list, &head->task_list);
140 }
141
__add_wait_queue_tail_exclusive(wait_queue_head_t * q,wait_queue_t * wait)142 static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
143 wait_queue_t *wait)
144 {
145 wait->flags |= WQ_FLAG_EXCLUSIVE;
146 __add_wait_queue_tail(q, wait);
147 }
148
__remove_wait_queue(wait_queue_head_t * head,wait_queue_t * old)149 static inline void __remove_wait_queue(wait_queue_head_t *head,
150 wait_queue_t *old)
151 {
152 list_del(&old->task_list);
153 }
154
155 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
156 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
157 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
158 void *key);
159 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
160 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
161 void __wake_up_bit(wait_queue_head_t *, void *, int);
162 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
163 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
164 void wake_up_bit(void *, int);
165 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
166 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
167 wait_queue_head_t *bit_waitqueue(void *, int);
168
169 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
170 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
171 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
172 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
173 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
174
175 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
176 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
177 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
178 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
179
180 /*
181 * Wakeup macros to be used to report events to the targets.
182 */
183 #define wake_up_poll(x, m) \
184 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
185 #define wake_up_locked_poll(x, m) \
186 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
187 #define wake_up_interruptible_poll(x, m) \
188 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
189 #define wake_up_interruptible_sync_poll(x, m) \
190 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
191
192 #define __wait_event(wq, condition) \
193 do { \
194 DEFINE_WAIT(__wait); \
195 \
196 for (;;) { \
197 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
198 if (condition) \
199 break; \
200 schedule(); \
201 } \
202 finish_wait(&wq, &__wait); \
203 } while (0)
204
205 /**
206 * wait_event - sleep until a condition gets true
207 * @wq: the waitqueue to wait on
208 * @condition: a C expression for the event to wait for
209 *
210 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
211 * @condition evaluates to true. The @condition is checked each time
212 * the waitqueue @wq is woken up.
213 *
214 * wake_up() has to be called after changing any variable that could
215 * change the result of the wait condition.
216 */
217 #define wait_event(wq, condition) \
218 do { \
219 if (condition) \
220 break; \
221 __wait_event(wq, condition); \
222 } while (0)
223
224 #define __wait_event_timeout(wq, condition, ret) \
225 do { \
226 DEFINE_WAIT(__wait); \
227 \
228 for (;;) { \
229 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
230 if (condition) \
231 break; \
232 ret = schedule_timeout(ret); \
233 if (!ret) \
234 break; \
235 } \
236 if (!ret && (condition)) \
237 ret = 1; \
238 finish_wait(&wq, &__wait); \
239 } while (0)
240
241 /**
242 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
243 * @wq: the waitqueue to wait on
244 * @condition: a C expression for the event to wait for
245 * @timeout: timeout, in jiffies
246 *
247 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
248 * @condition evaluates to true. The @condition is checked each time
249 * the waitqueue @wq is woken up.
250 *
251 * wake_up() has to be called after changing any variable that could
252 * change the result of the wait condition.
253 *
254 * The function returns 0 if the @timeout elapsed, or the remaining
255 * jiffies (at least 1) if the @condition evaluated to %true before
256 * the @timeout elapsed.
257 */
258 #define wait_event_timeout(wq, condition, timeout) \
259 ({ \
260 long __ret = timeout; \
261 if (!(condition)) \
262 __wait_event_timeout(wq, condition, __ret); \
263 __ret; \
264 })
265
266 #define __wait_event_interruptible(wq, condition, ret) \
267 do { \
268 DEFINE_WAIT(__wait); \
269 \
270 for (;;) { \
271 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
272 if (condition) \
273 break; \
274 if (!signal_pending(current)) { \
275 schedule(); \
276 continue; \
277 } \
278 ret = -ERESTARTSYS; \
279 break; \
280 } \
281 finish_wait(&wq, &__wait); \
282 } while (0)
283
284 /**
285 * wait_event_interruptible - sleep until a condition gets true
286 * @wq: the waitqueue to wait on
287 * @condition: a C expression for the event to wait for
288 *
289 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
290 * @condition evaluates to true or a signal is received.
291 * The @condition is checked each time the waitqueue @wq is woken up.
292 *
293 * wake_up() has to be called after changing any variable that could
294 * change the result of the wait condition.
295 *
296 * The function will return -ERESTARTSYS if it was interrupted by a
297 * signal and 0 if @condition evaluated to true.
298 */
299 #define wait_event_interruptible(wq, condition) \
300 ({ \
301 int __ret = 0; \
302 if (!(condition)) \
303 __wait_event_interruptible(wq, condition, __ret); \
304 __ret; \
305 })
306
307 #define __wait_event_interruptible_timeout(wq, condition, ret) \
308 do { \
309 DEFINE_WAIT(__wait); \
310 \
311 for (;;) { \
312 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
313 if (condition) \
314 break; \
315 if (!signal_pending(current)) { \
316 ret = schedule_timeout(ret); \
317 if (!ret) \
318 break; \
319 continue; \
320 } \
321 ret = -ERESTARTSYS; \
322 break; \
323 } \
324 if (!ret && (condition)) \
325 ret = 1; \
326 finish_wait(&wq, &__wait); \
327 } while (0)
328
329 /**
330 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
331 * @wq: the waitqueue to wait on
332 * @condition: a C expression for the event to wait for
333 * @timeout: timeout, in jiffies
334 *
335 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
336 * @condition evaluates to true or a signal is received.
337 * The @condition is checked each time the waitqueue @wq is woken up.
338 *
339 * wake_up() has to be called after changing any variable that could
340 * change the result of the wait condition.
341 *
342 * Returns:
343 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
344 * a signal, or the remaining jiffies (at least 1) if the @condition
345 * evaluated to %true before the @timeout elapsed.
346 */
347 #define wait_event_interruptible_timeout(wq, condition, timeout) \
348 ({ \
349 long __ret = timeout; \
350 if (!(condition)) \
351 __wait_event_interruptible_timeout(wq, condition, __ret); \
352 __ret; \
353 })
354
355 #define __wait_event_interruptible_exclusive(wq, condition, ret) \
356 do { \
357 DEFINE_WAIT(__wait); \
358 \
359 for (;;) { \
360 prepare_to_wait_exclusive(&wq, &__wait, \
361 TASK_INTERRUPTIBLE); \
362 if (condition) { \
363 finish_wait(&wq, &__wait); \
364 break; \
365 } \
366 if (!signal_pending(current)) { \
367 schedule(); \
368 continue; \
369 } \
370 ret = -ERESTARTSYS; \
371 abort_exclusive_wait(&wq, &__wait, \
372 TASK_INTERRUPTIBLE, NULL); \
373 break; \
374 } \
375 } while (0)
376
377 #define wait_event_interruptible_exclusive(wq, condition) \
378 ({ \
379 int __ret = 0; \
380 if (!(condition)) \
381 __wait_event_interruptible_exclusive(wq, condition, __ret);\
382 __ret; \
383 })
384
385
386 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
387 ({ \
388 int __ret = 0; \
389 DEFINE_WAIT(__wait); \
390 if (exclusive) \
391 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
392 do { \
393 if (likely(list_empty(&__wait.task_list))) \
394 __add_wait_queue_tail(&(wq), &__wait); \
395 set_current_state(TASK_INTERRUPTIBLE); \
396 if (signal_pending(current)) { \
397 __ret = -ERESTARTSYS; \
398 break; \
399 } \
400 if (irq) \
401 spin_unlock_irq(&(wq).lock); \
402 else \
403 spin_unlock(&(wq).lock); \
404 schedule(); \
405 if (irq) \
406 spin_lock_irq(&(wq).lock); \
407 else \
408 spin_lock(&(wq).lock); \
409 } while (!(condition)); \
410 __remove_wait_queue(&(wq), &__wait); \
411 __set_current_state(TASK_RUNNING); \
412 __ret; \
413 })
414
415
416 /**
417 * wait_event_interruptible_locked - sleep until a condition gets true
418 * @wq: the waitqueue to wait on
419 * @condition: a C expression for the event to wait for
420 *
421 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
422 * @condition evaluates to true or a signal is received.
423 * The @condition is checked each time the waitqueue @wq is woken up.
424 *
425 * It must be called with wq.lock being held. This spinlock is
426 * unlocked while sleeping but @condition testing is done while lock
427 * is held and when this macro exits the lock is held.
428 *
429 * The lock is locked/unlocked using spin_lock()/spin_unlock()
430 * functions which must match the way they are locked/unlocked outside
431 * of this macro.
432 *
433 * wake_up_locked() has to be called after changing any variable that could
434 * change the result of the wait condition.
435 *
436 * The function will return -ERESTARTSYS if it was interrupted by a
437 * signal and 0 if @condition evaluated to true.
438 */
439 #define wait_event_interruptible_locked(wq, condition) \
440 ((condition) \
441 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
442
443 /**
444 * wait_event_interruptible_locked_irq - sleep until a condition gets true
445 * @wq: the waitqueue to wait on
446 * @condition: a C expression for the event to wait for
447 *
448 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
449 * @condition evaluates to true or a signal is received.
450 * The @condition is checked each time the waitqueue @wq is woken up.
451 *
452 * It must be called with wq.lock being held. This spinlock is
453 * unlocked while sleeping but @condition testing is done while lock
454 * is held and when this macro exits the lock is held.
455 *
456 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
457 * functions which must match the way they are locked/unlocked outside
458 * of this macro.
459 *
460 * wake_up_locked() has to be called after changing any variable that could
461 * change the result of the wait condition.
462 *
463 * The function will return -ERESTARTSYS if it was interrupted by a
464 * signal and 0 if @condition evaluated to true.
465 */
466 #define wait_event_interruptible_locked_irq(wq, condition) \
467 ((condition) \
468 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
469
470 /**
471 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
472 * @wq: the waitqueue to wait on
473 * @condition: a C expression for the event to wait for
474 *
475 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
476 * @condition evaluates to true or a signal is received.
477 * The @condition is checked each time the waitqueue @wq is woken up.
478 *
479 * It must be called with wq.lock being held. This spinlock is
480 * unlocked while sleeping but @condition testing is done while lock
481 * is held and when this macro exits the lock is held.
482 *
483 * The lock is locked/unlocked using spin_lock()/spin_unlock()
484 * functions which must match the way they are locked/unlocked outside
485 * of this macro.
486 *
487 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
488 * set thus when other process waits process on the list if this
489 * process is awaken further processes are not considered.
490 *
491 * wake_up_locked() has to be called after changing any variable that could
492 * change the result of the wait condition.
493 *
494 * The function will return -ERESTARTSYS if it was interrupted by a
495 * signal and 0 if @condition evaluated to true.
496 */
497 #define wait_event_interruptible_exclusive_locked(wq, condition) \
498 ((condition) \
499 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
500
501 /**
502 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
503 * @wq: the waitqueue to wait on
504 * @condition: a C expression for the event to wait for
505 *
506 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
507 * @condition evaluates to true or a signal is received.
508 * The @condition is checked each time the waitqueue @wq is woken up.
509 *
510 * It must be called with wq.lock being held. This spinlock is
511 * unlocked while sleeping but @condition testing is done while lock
512 * is held and when this macro exits the lock is held.
513 *
514 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
515 * functions which must match the way they are locked/unlocked outside
516 * of this macro.
517 *
518 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
519 * set thus when other process waits process on the list if this
520 * process is awaken further processes are not considered.
521 *
522 * wake_up_locked() has to be called after changing any variable that could
523 * change the result of the wait condition.
524 *
525 * The function will return -ERESTARTSYS if it was interrupted by a
526 * signal and 0 if @condition evaluated to true.
527 */
528 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
529 ((condition) \
530 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
531
532
533 #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
534 lock, ret) \
535 do { \
536 DEFINE_WAIT(__wait); \
537 \
538 for (;;) { \
539 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
540 if (condition) \
541 break; \
542 if (signal_pending(current)) { \
543 ret = -ERESTARTSYS; \
544 break; \
545 } \
546 spin_unlock_irq(&lock); \
547 ret = schedule_timeout(ret); \
548 spin_lock_irq(&lock); \
549 if (!ret) \
550 break; \
551 } \
552 finish_wait(&wq, &__wait); \
553 } while (0)
554
555 /**
556 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
557 * The condition is checked under the lock. This is expected
558 * to be called with the lock taken.
559 * @wq: the waitqueue to wait on
560 * @condition: a C expression for the event to wait for
561 * @lock: a locked spinlock_t, which will be released before schedule()
562 * and reacquired afterwards.
563 * @timeout: timeout, in jiffies
564 *
565 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
566 * @condition evaluates to true or signal is received. The @condition is
567 * checked each time the waitqueue @wq is woken up.
568 *
569 * wake_up() has to be called after changing any variable that could
570 * change the result of the wait condition.
571 *
572 * This is supposed to be called while holding the lock. The lock is
573 * dropped before going to sleep and is reacquired afterwards.
574 *
575 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
576 * was interrupted by a signal, and the remaining jiffies otherwise
577 * if the condition evaluated to true before the timeout elapsed.
578 */
579 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
580 timeout) \
581 ({ \
582 int __ret = timeout; \
583 \
584 if (!(condition)) \
585 __wait_event_interruptible_lock_irq_timeout( \
586 wq, condition, lock, __ret); \
587 __ret; \
588 })
589
590
591 #define __wait_event_killable(wq, condition, ret) \
592 do { \
593 DEFINE_WAIT(__wait); \
594 \
595 for (;;) { \
596 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
597 if (condition) \
598 break; \
599 if (!fatal_signal_pending(current)) { \
600 schedule(); \
601 continue; \
602 } \
603 ret = -ERESTARTSYS; \
604 break; \
605 } \
606 finish_wait(&wq, &__wait); \
607 } while (0)
608
609 /**
610 * wait_event_killable - sleep until a condition gets true
611 * @wq: the waitqueue to wait on
612 * @condition: a C expression for the event to wait for
613 *
614 * The process is put to sleep (TASK_KILLABLE) until the
615 * @condition evaluates to true or a signal is received.
616 * The @condition is checked each time the waitqueue @wq is woken up.
617 *
618 * wake_up() has to be called after changing any variable that could
619 * change the result of the wait condition.
620 *
621 * The function will return -ERESTARTSYS if it was interrupted by a
622 * signal and 0 if @condition evaluated to true.
623 */
624 #define wait_event_killable(wq, condition) \
625 ({ \
626 int __ret = 0; \
627 if (!(condition)) \
628 __wait_event_killable(wq, condition, __ret); \
629 __ret; \
630 })
631
632 /*
633 * These are the old interfaces to sleep waiting for an event.
634 * They are racy. DO NOT use them, use the wait_event* interfaces above.
635 * We plan to remove these interfaces.
636 */
637 extern void sleep_on(wait_queue_head_t *q);
638 extern long sleep_on_timeout(wait_queue_head_t *q,
639 signed long timeout);
640 extern void interruptible_sleep_on(wait_queue_head_t *q);
641 extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
642 signed long timeout);
643
644 /*
645 * Waitqueues which are removed from the waitqueue_head at wakeup time
646 */
647 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
648 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
649 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
650 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
651 unsigned int mode, void *key);
652 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
653 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
654
655 #define DEFINE_WAIT_FUNC(name, function) \
656 wait_queue_t name = { \
657 .private = current, \
658 .func = function, \
659 .task_list = LIST_HEAD_INIT((name).task_list), \
660 }
661
662 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
663
664 #define DEFINE_WAIT_BIT(name, word, bit) \
665 struct wait_bit_queue name = { \
666 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
667 .wait = { \
668 .private = current, \
669 .func = wake_bit_function, \
670 .task_list = \
671 LIST_HEAD_INIT((name).wait.task_list), \
672 }, \
673 }
674
675 #define init_wait(wait) \
676 do { \
677 (wait)->private = current; \
678 (wait)->func = autoremove_wake_function; \
679 INIT_LIST_HEAD(&(wait)->task_list); \
680 (wait)->flags = 0; \
681 } while (0)
682
683 /**
684 * wait_on_bit - wait for a bit to be cleared
685 * @word: the word being waited on, a kernel virtual address
686 * @bit: the bit of the word being waited on
687 * @action: the function used to sleep, which may take special actions
688 * @mode: the task state to sleep in
689 *
690 * There is a standard hashed waitqueue table for generic use. This
691 * is the part of the hashtable's accessor API that waits on a bit.
692 * For instance, if one were to have waiters on a bitflag, one would
693 * call wait_on_bit() in threads waiting for the bit to clear.
694 * One uses wait_on_bit() where one is waiting for the bit to clear,
695 * but has no intention of setting it.
696 */
wait_on_bit(void * word,int bit,int (* action)(void *),unsigned mode)697 static inline int wait_on_bit(void *word, int bit,
698 int (*action)(void *), unsigned mode)
699 {
700 if (!test_bit(bit, word))
701 return 0;
702 return out_of_line_wait_on_bit(word, bit, action, mode);
703 }
704
705 /**
706 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
707 * @word: the word being waited on, a kernel virtual address
708 * @bit: the bit of the word being waited on
709 * @action: the function used to sleep, which may take special actions
710 * @mode: the task state to sleep in
711 *
712 * There is a standard hashed waitqueue table for generic use. This
713 * is the part of the hashtable's accessor API that waits on a bit
714 * when one intends to set it, for instance, trying to lock bitflags.
715 * For instance, if one were to have waiters trying to set bitflag
716 * and waiting for it to clear before setting it, one would call
717 * wait_on_bit() in threads waiting to be able to set the bit.
718 * One uses wait_on_bit_lock() where one is waiting for the bit to
719 * clear with the intention of setting it, and when done, clearing it.
720 */
wait_on_bit_lock(void * word,int bit,int (* action)(void *),unsigned mode)721 static inline int wait_on_bit_lock(void *word, int bit,
722 int (*action)(void *), unsigned mode)
723 {
724 if (!test_and_set_bit(bit, word))
725 return 0;
726 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
727 }
728
729 #endif /* __KERNEL__ */
730
731 #endif
732